aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/CL')
-rw-r--r--src/core/CL/CLCommandBuffer.cpp (renamed from src/core/CL/CLCoreRuntimeContext.cpp)46
-rw-r--r--src/core/CL/CLCommandBuffer.h163
-rw-r--r--src/core/CL/CLCompatCommandBuffer.cpp108
-rw-r--r--src/core/CL/CLCompatCommandBuffer.h94
-rw-r--r--src/core/CL/CLCompileContext.cpp131
-rw-r--r--src/core/CL/CLHelpers.cpp174
-rw-r--r--src/core/CL/CLKernelLibrary.cpp941
-rw-r--r--src/core/CL/CLKernels.h49
-rw-r--r--src/core/CL/CLMutableCommandBuffer.cpp151
-rw-r--r--src/core/CL/CLMutableCommandBuffer.h85
-rw-r--r--src/core/CL/CLTracePoint.cpp85
-rw-r--r--src/core/CL/CLUtils.cpp81
-rw-r--r--src/core/CL/CLUtils.h47
-rw-r--r--src/core/CL/CLValidate.h18
-rw-r--r--src/core/CL/DefaultLWSHeuristics.cpp127
-rw-r--r--src/core/CL/DefaultLWSHeuristics.h (renamed from src/core/CL/ICLMultiImage.cpp)23
-rw-r--r--src/core/CL/ICLDistribution1D.cpp51
-rw-r--r--src/core/CL/ICLGEMMKernelConfiguration.h68
-rw-r--r--src/core/CL/ICLHOG.cpp47
-rw-r--r--src/core/CL/ICLKernel.cpp118
-rw-r--r--src/core/CL/ICLKernel.h178
-rw-r--r--src/core/CL/ICLLut.cpp47
-rw-r--r--src/core/CL/ICLMultiHOG.cpp38
-rw-r--r--src/core/CL/ICLSimple2DKernel.cpp3
-rw-r--r--src/core/CL/ICLSimple2DKernel.h2
-rw-r--r--src/core/CL/ICLSimple3DKernel.cpp3
-rw-r--r--src/core/CL/ICLSimple3DKernel.h2
-rw-r--r--src/core/CL/ICLSimpleKernel.cpp17
-rw-r--r--src/core/CL/ICLSimpleKernel.h9
-rw-r--r--src/core/CL/ICLTensor.cpp3
-rw-r--r--src/core/CL/OpenCL.cpp839
-rw-r--r--src/core/CL/cl_kernels/activation_float_helpers.h16
-rw-r--r--src/core/CL/cl_kernels/activation_quant_helpers.h24
-rw-r--r--src/core/CL/cl_kernels/batch_to_space.cl232
-rw-r--r--src/core/CL/cl_kernels/batchnormalization_layer.cl418
-rw-r--r--src/core/CL/cl_kernels/channel_shuffle.cl181
-rw-r--r--src/core/CL/cl_kernels/common/activation_layer.cl (renamed from src/core/CL/cl_kernels/activation_layer.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/activation_layer_quant.cl (renamed from src/core/CL/cl_kernels/activation_layer_quant.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/arg_min_max.cl (renamed from src/core/CL/cl_kernels/arg_min_max.cl)371
-rw-r--r--src/core/CL/cl_kernels/common/batchnormalization_layer.cl183
-rw-r--r--src/core/CL/cl_kernels/common/bitwise_op.cl (renamed from src/core/CL/cl_kernels/bitwise_op.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/bounding_box_transform.cl (renamed from src/core/CL/cl_kernels/bounding_box_transform.cl)4
-rw-r--r--src/core/CL/cl_kernels/common/bounding_box_transform_quantized.cl (renamed from src/core/CL/cl_kernels/bounding_box_transform_quantized.cl)4
-rw-r--r--src/core/CL/cl_kernels/common/cast.cl (renamed from src/core/CL/cl_kernels/depth_convert.cl)20
-rw-r--r--src/core/CL/cl_kernels/common/col2im.cl (renamed from src/core/CL/cl_kernels/col2im.cl)4
-rw-r--r--src/core/CL/cl_kernels/common/comparisons.cl123
-rw-r--r--src/core/CL/cl_kernels/common/concatenate.cl (renamed from src/core/CL/cl_kernels/concatenate.cl)64
-rw-r--r--src/core/CL/cl_kernels/common/convert_fc_weights.cl (renamed from src/core/CL/cl_kernels/convert_fc_weights.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/convolution_layer.cl (renamed from src/core/CL/cl_kernels/convolution_layer.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/copy_tensor.cl (renamed from src/core/CL/cl_kernels/copy_tensor.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/crop_tensor.cl (renamed from src/core/CL/cl_kernels/crop_tensor.cl)4
-rw-r--r--src/core/CL/cl_kernels/common/deconvolution_layer.cl (renamed from src/core/CL/cl_kernels/deconvolution_layer.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/dequantization_layer.cl90
-rw-r--r--src/core/CL/cl_kernels/common/elementwise_operation.cl (renamed from src/core/CL/cl_kernels/elementwise_operation.cl)62
-rw-r--r--src/core/CL/cl_kernels/common/elementwise_operation_quantized.cl (renamed from src/core/CL/cl_kernels/elementwise_operation_quantized.cl)43
-rw-r--r--src/core/CL/cl_kernels/common/elementwise_unary.cl (renamed from src/core/CL/cl_kernels/elementwise_unary.cl)9
-rw-r--r--src/core/CL/cl_kernels/common/elementwise_unary_quantized.cl77
-rw-r--r--src/core/CL/cl_kernels/common/fft.cl (renamed from src/core/CL/cl_kernels/fft.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/fft_digit_reverse.cl (renamed from src/core/CL/cl_kernels/fft_digit_reverse.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/fft_scale.cl (renamed from src/core/CL/cl_kernels/fft_scale.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/fill_border.cl (renamed from src/core/CL/cl_kernels/fill_border.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/floor.cl (renamed from src/core/CL/cl_kernels/floor.cl)0
-rw-r--r--src/core/CL/cl_kernels/common/gather.cl (renamed from src/core/CL/cl_kernels/gather.cl)71
-rw-r--r--src/core/CL/cl_kernels/common/gemm.cl (renamed from src/core/CL/cl_kernels/gemm.cl)1206
-rw-r--r--src/core/CL/cl_kernels/common/gemm_reshaped_only_rhs_mmul.cl556
-rw-r--r--src/core/CL/cl_kernels/common/gemm_utils.cl458
-rw-r--r--src/core/CL/cl_kernels/common/gemmlowp.cl (renamed from src/core/CL/cl_kernels/gemmlowp.cl)551
-rw-r--r--src/core/CL/cl_kernels/common/gemmlowp_reshaped_only_rhs_mmul.cl309
-rw-r--r--src/core/CL/cl_kernels/common/gemv.cl (renamed from src/core/CL/cl_kernels/gemv.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/generate_proposals.cl (renamed from src/core/CL/cl_kernels/generate_proposals.cl)14
-rw-r--r--src/core/CL/cl_kernels/common/generate_proposals_quantized.cl (renamed from src/core/CL/cl_kernels/generate_proposals_quantized.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/instance_normalization.cl (renamed from src/core/CL/cl_kernels/instance_normalization.cl)128
-rw-r--r--src/core/CL/cl_kernels/common/l2_normalize.cl189
-rw-r--r--src/core/CL/cl_kernels/common/mat_mul.cl708
-rw-r--r--src/core/CL/cl_kernels/common/mat_mul_mmul.cl946
-rw-r--r--src/core/CL/cl_kernels/common/mat_mul_quantized.cl833
-rw-r--r--src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl832
-rw-r--r--src/core/CL/cl_kernels/common/mean_stddev_normalization.cl (renamed from src/core/CL/cl_kernels/mean_stddev_normalization.cl)42
-rw-r--r--src/core/CL/cl_kernels/common/memset.cl (renamed from src/core/CL/cl_kernels/memset.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/minmax_layer.cl (renamed from src/core/CL/cl_kernels/minmax_layer.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/nonmax.cl (renamed from src/core/CL/cl_kernels/nonmax.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/pad_layer.cl (renamed from src/core/CL/cl_kernels/pad_layer.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/permute.cl (renamed from src/core/CL/cl_kernels/permute.cl)4
-rw-r--r--src/core/CL/cl_kernels/common/pixelwise_mul_float.cl (renamed from src/core/CL/cl_kernels/pixelwise_mul_float.cl)48
-rw-r--r--src/core/CL/cl_kernels/common/pixelwise_mul_int.cl (renamed from src/core/CL/cl_kernels/pixelwise_mul_int.cl)79
-rw-r--r--src/core/CL/cl_kernels/common/qlstm_layer_normalization.cl (renamed from src/core/CL/cl_kernels/qlstm_layer_normalization.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/quantization_layer.cl (renamed from src/core/CL/cl_kernels/quantization_layer.cl)6
-rw-r--r--src/core/CL/cl_kernels/common/range.cl (renamed from src/core/CL/cl_kernels/range.cl)0
-rw-r--r--src/core/CL/cl_kernels/common/reduction_operation.cl (renamed from src/core/CL/cl_kernels/reduction_operation.cl)372
-rw-r--r--src/core/CL/cl_kernels/common/reshape_layer.cl (renamed from src/core/CL/cl_kernels/reshape_layer.cl)26
-rw-r--r--src/core/CL/cl_kernels/common/reverse.cl (renamed from src/core/CL/cl_kernels/reverse.cl)30
-rw-r--r--src/core/CL/cl_kernels/common/roi_align_layer.cl (renamed from src/core/CL/cl_kernels/roi_align_layer.cl)6
-rw-r--r--src/core/CL/cl_kernels/common/roi_align_layer_quantized.cl (renamed from src/core/CL/cl_kernels/roi_align_layer_quantized.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/roi_pooling_layer.cl (renamed from src/core/CL/cl_kernels/roi_pooling_layer.cl)56
-rw-r--r--src/core/CL/cl_kernels/common/scatter.cl173
-rw-r--r--src/core/CL/cl_kernels/common/select.cl (renamed from src/core/CL/cl_kernels/select.cl)8
-rw-r--r--src/core/CL/cl_kernels/common/slice_ops.cl (renamed from src/core/CL/cl_kernels/slice_ops.cl)6
-rw-r--r--src/core/CL/cl_kernels/common/softmax_layer.cl371
-rw-r--r--src/core/CL/cl_kernels/common/stack_layer.cl (renamed from src/core/CL/cl_kernels/stack_layer.cl)2
-rw-r--r--src/core/CL/cl_kernels/common/tile.cl (renamed from src/core/CL/cl_kernels/tile.cl)33
-rw-r--r--src/core/CL/cl_kernels/common/transpose.cl (renamed from src/core/CL/cl_kernels/transpose.cl)17
-rw-r--r--src/core/CL/cl_kernels/common/unpooling_layer.cl (renamed from src/core/CL/cl_kernels/unpooling_layer.cl)2
-rw-r--r--src/core/CL/cl_kernels/comparisons.cl150
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution.cl1879
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution_quantized.cl1790
-rw-r--r--src/core/CL/cl_kernels/dequantization_layer.cl212
-rw-r--r--src/core/CL/cl_kernels/direct_convolution.cl604
-rw-r--r--src/core/CL/cl_kernels/direct_convolution1x1.cl316
-rw-r--r--src/core/CL/cl_kernels/direct_convolution3x3.cl291
-rw-r--r--src/core/CL/cl_kernels/direct_convolution5x5.cl313
-rw-r--r--src/core/CL/cl_kernels/direct_convolution_quantized.cl308
-rw-r--r--src/core/CL/cl_kernels/gemm_helpers.h557
-rw-r--r--src/core/CL/cl_kernels/gemm_v1.cl3238
-rw-r--r--src/core/CL/cl_kernels/helpers.h897
-rw-r--r--src/core/CL/cl_kernels/helpers_asymm.h367
-rw-r--r--src/core/CL/cl_kernels/l2_normalize.cl164
-rw-r--r--src/core/CL/cl_kernels/load_store_utility.h84
-rw-r--r--src/core/CL/cl_kernels/nchw/batch_to_space.cl (renamed from src/core/CL/cl_kernels/space_to_depth.cl)72
-rw-r--r--src/core/CL/cl_kernels/nchw/batchnormalization_layer.cl147
-rw-r--r--src/core/CL/cl_kernels/nchw/channel_shuffle.cl103
-rw-r--r--src/core/CL/cl_kernels/nchw/depth_to_space.cl69
-rw-r--r--src/core/CL/cl_kernels/nchw/dequantization_layer.cl86
-rw-r--r--src/core/CL/cl_kernels/nchw/direct_convolution.cl147
-rw-r--r--src/core/CL/cl_kernels/nchw/im2col.cl (renamed from src/core/CL/cl_kernels/im2col.cl)501
-rw-r--r--src/core/CL/cl_kernels/nchw/normalization_layer.cl (renamed from src/core/CL/cl_kernels/normalization_layer.cl)122
-rw-r--r--src/core/CL/cl_kernels/nchw/normalize_planar_yuv_layer.cl (renamed from src/core/CL/cl_kernels/normalize_planar_yuv_layer.cl)56
-rw-r--r--src/core/CL/cl_kernels/nchw/normalize_planar_yuv_layer_quantized.cl (renamed from src/core/CL/cl_kernels/normalize_planar_yuv_layer_quantized.cl)83
-rw-r--r--src/core/CL/cl_kernels/nchw/pooling_layer.cl285
-rw-r--r--src/core/CL/cl_kernels/nchw/prior_box_layer.cl (renamed from src/core/CL/cl_kernels/prior_box_layer.cl)2
-rw-r--r--src/core/CL/cl_kernels/nchw/reorg_layer.cl (renamed from src/core/CL/cl_kernels/reorg_layer.cl)43
-rw-r--r--src/core/CL/cl_kernels/nchw/scale.cl271
-rw-r--r--src/core/CL/cl_kernels/nchw/space_to_batch.cl156
-rw-r--r--src/core/CL/cl_kernels/nchw/space_to_depth.cl69
-rw-r--r--src/core/CL/cl_kernels/nchw/upsample_layer.cl79
-rw-r--r--src/core/CL/cl_kernels/nchw/winograd_filter_transform.cl911
-rw-r--r--src/core/CL/cl_kernels/nchw/winograd_input_transform.cl1346
-rw-r--r--src/core/CL/cl_kernels/nchw/winograd_output_transform.cl1082
-rw-r--r--src/core/CL/cl_kernels/nhwc/batch_to_space.cl (renamed from src/core/CL/cl_kernels/depth_to_space.cl)88
-rw-r--r--src/core/CL/cl_kernels/nhwc/batchnormalization_layer.cl146
-rw-r--r--src/core/CL/cl_kernels/nhwc/channel_shuffle.cl160
-rw-r--r--src/core/CL/cl_kernels/nhwc/depth_to_space.cl69
-rw-r--r--src/core/CL/cl_kernels/nhwc/dequantization_layer.cl87
-rw-r--r--src/core/CL/cl_kernels/nhwc/direct_convolution.cl295
-rw-r--r--src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl281
-rw-r--r--src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl211
-rw-r--r--src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl275
-rw-r--r--src/core/CL/cl_kernels/nhwc/im2col.cl526
-rw-r--r--src/core/CL/cl_kernels/nhwc/indirect_convolution.cl305
-rw-r--r--src/core/CL/cl_kernels/nhwc/normalization_layer.cl177
-rw-r--r--src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer.cl81
-rw-r--r--src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer_quantized.cl96
-rw-r--r--src/core/CL/cl_kernels/nhwc/pooling_3d_layer.cl197
-rw-r--r--src/core/CL/cl_kernels/nhwc/pooling_3d_layer_quantized.cl185
-rw-r--r--src/core/CL/cl_kernels/nhwc/pooling_layer.cl364
-rw-r--r--src/core/CL/cl_kernels/nhwc/pooling_layer_quantized.cl (renamed from src/core/CL/cl_kernels/pooling_layer_quantized.cl)104
-rw-r--r--src/core/CL/cl_kernels/nhwc/reorg_layer.cl76
-rw-r--r--src/core/CL/cl_kernels/nhwc/scale.cl245
-rw-r--r--src/core/CL/cl_kernels/nhwc/space_to_batch.cl (renamed from src/core/CL/cl_kernels/space_to_batch.cl)131
-rw-r--r--src/core/CL/cl_kernels/nhwc/space_to_depth.cl69
-rw-r--r--src/core/CL/cl_kernels/nhwc/transposed_convolution.cl297
-rw-r--r--src/core/CL/cl_kernels/nhwc/upsample_layer.cl (renamed from src/core/CL/cl_kernels/upsample_layer.cl)59
-rw-r--r--src/core/CL/cl_kernels/nhwc/winograd_filter_transform.cl (renamed from src/core/CL/cl_kernels/winograd_filter_transform.cl)965
-rw-r--r--src/core/CL/cl_kernels/nhwc/winograd_input_transform.cl1050
-rw-r--r--src/core/CL/cl_kernels/nhwc/winograd_output_transform.cl1109
-rw-r--r--src/core/CL/cl_kernels/pooling_layer.cl981
-rw-r--r--src/core/CL/cl_kernels/remap.cl132
-rw-r--r--src/core/CL/cl_kernels/repeat.h42
-rw-r--r--src/core/CL/cl_kernels/scale.cl297
-rw-r--r--src/core/CL/cl_kernels/scale_quantized.cl185
-rw-r--r--src/core/CL/cl_kernels/sobel_filter.cl541
-rw-r--r--src/core/CL/cl_kernels/softmax_layer.cl533
-rw-r--r--src/core/CL/cl_kernels/softmax_layer_quantized.cl532
-rw-r--r--src/core/CL/cl_kernels/tile_helpers.h1451
-rw-r--r--src/core/CL/cl_kernels/warp_helpers.h69
-rw-r--r--src/core/CL/cl_kernels/warp_helpers_quantized.h138
-rw-r--r--src/core/CL/cl_kernels/winograd_input_transform.cl2753
-rw-r--r--src/core/CL/cl_kernels/winograd_output_transform.cl2266
-rw-r--r--src/core/CL/cl_kernels/yolo_layer.cl172
-rw-r--r--src/core/CL/gemm/CLGEMMHelpers.cpp113
-rw-r--r--src/core/CL/gemm/CLGEMMHelpers.h92
-rw-r--r--src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.cpp272
-rw-r--r--src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.h56
-rw-r--r--src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.cpp75
-rw-r--r--src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.h51
-rw-r--r--src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.cpp180
-rw-r--r--src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.h53
-rw-r--r--src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h65
-rw-r--r--src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.cpp376
-rw-r--r--src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.h58
-rw-r--r--src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.cpp228
-rw-r--r--src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.h53
-rw-r--r--src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h63
-rw-r--r--src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.cpp560
-rw-r--r--src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.h61
-rw-r--r--src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.cpp259
-rw-r--r--src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.h53
-rw-r--r--src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h63
-rw-r--r--src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp138
-rw-r--r--src/core/CL/kernels/CLArgMinMaxLayerKernel.h41
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp129
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.h35
-rw-r--r--src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp122
-rw-r--r--src/core/CL/kernels/CLBatchToSpaceLayerKernel.h36
-rw-r--r--src/core/CL/kernels/CLBitwiseKernel.cpp28
-rw-r--r--src/core/CL/kernels/CLBitwiseKernel.h6
-rw-r--r--src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp48
-rw-r--r--src/core/CL/kernels/CLBoundingBoxTransformKernel.h16
-rw-r--r--src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp96
-rw-r--r--src/core/CL/kernels/CLChannelShuffleLayerKernel.h5
-rw-r--r--src/core/CL/kernels/CLCol2ImKernel.cpp176
-rw-r--r--src/core/CL/kernels/CLCol2ImKernel.h106
-rw-r--r--src/core/CL/kernels/CLComparisonKernel.cpp111
-rw-r--r--src/core/CL/kernels/CLComparisonKernel.h21
-rw-r--r--src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp121
-rw-r--r--src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h90
-rw-r--r--src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp30
-rw-r--r--src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h5
-rw-r--r--src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp89
-rw-r--r--src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h23
-rw-r--r--src/core/CL/kernels/CLDepthConvertLayerKernel.cpp160
-rw-r--r--src/core/CL/kernels/CLDepthConvertLayerKernel.h91
-rw-r--r--src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp33
-rw-r--r--src/core/CL/kernels/CLDepthToSpaceLayerKernel.h4
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp434
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h114
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp464
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h113
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp416
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h114
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp132
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h85
-rw-r--r--src/core/CL/kernels/CLDequantizationLayerKernel.cpp159
-rw-r--r--src/core/CL/kernels/CLDequantizationLayerKernel.h79
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp585
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.h126
-rw-r--r--src/core/CL/kernels/CLFFTDigitReverseKernel.cpp48
-rw-r--r--src/core/CL/kernels/CLFFTDigitReverseKernel.h18
-rw-r--r--src/core/CL/kernels/CLFFTRadixStageKernel.cpp54
-rw-r--r--src/core/CL/kernels/CLFFTRadixStageKernel.h9
-rw-r--r--src/core/CL/kernels/CLFFTScaleKernel.cpp61
-rw-r--r--src/core/CL/kernels/CLFFTScaleKernel.h9
-rw-r--r--src/core/CL/kernels/CLFillBorderKernel.cpp63
-rw-r--r--src/core/CL/kernels/CLFillBorderKernel.h18
-rw-r--r--src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp133
-rw-r--r--src/core/CL/kernels/CLFuseBatchNormalizationKernel.h41
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp334
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h108
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp297
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h123
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp573
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h155
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp215
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h116
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp274
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h136
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp153
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.h89
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp160
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h101
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp158
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h102
-rw-r--r--src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp221
-rw-r--r--src/core/CL/kernels/CLGEMMLowpReductionKernel.h176
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp544
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h122
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp422
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h127
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp443
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h188
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp449
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h168
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp221
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h105
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp175
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h135
-rw-r--r--src/core/CL/kernels/CLGatherKernel.cpp49
-rw-r--r--src/core/CL/kernels/CLGatherKernel.h10
-rw-r--r--src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp39
-rw-r--r--src/core/CL/kernels/CLGenerateProposalsLayerKernel.h7
-rw-r--r--src/core/CL/kernels/CLIm2ColKernel.cpp441
-rw-r--r--src/core/CL/kernels/CLIm2ColKernel.h136
-rw-r--r--src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp154
-rw-r--r--src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h70
-rw-r--r--src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp93
-rw-r--r--src/core/CL/kernels/CLL2NormalizeLayerKernel.h11
-rw-r--r--src/core/CL/kernels/CLLKTrackerKernel.cpp314
-rw-r--r--src/core/CL/kernels/CLLKTrackerKernel.h206
-rw-r--r--src/core/CL/kernels/CLMaxUnpoolingLayerKernel.cpp51
-rw-r--r--src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h15
-rw-r--r--src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp55
-rw-r--r--src/core/CL/kernels/CLMeanStdDevNormalizationKernel.h5
-rw-r--r--src/core/CL/kernels/CLMinMaxLayerKernel.cpp170
-rw-r--r--src/core/CL/kernels/CLMinMaxLayerKernel.h87
-rw-r--r--src/core/CL/kernels/CLNormalizationLayerKernel.cpp165
-rw-r--r--src/core/CL/kernels/CLNormalizationLayerKernel.h7
-rw-r--r--src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp98
-rw-r--r--src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h9
-rw-r--r--src/core/CL/kernels/CLPadLayerKernel.cpp99
-rw-r--r--src/core/CL/kernels/CLPadLayerKernel.h20
-rw-r--r--src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp481
-rw-r--r--src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h196
-rw-r--r--src/core/CL/kernels/CLPriorBoxLayerKernel.cpp86
-rw-r--r--src/core/CL/kernels/CLPriorBoxLayerKernel.h27
-rw-r--r--src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp51
-rw-r--r--src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h9
-rw-r--r--src/core/CL/kernels/CLQuantizationLayerKernel.cpp178
-rw-r--r--src/core/CL/kernels/CLQuantizationLayerKernel.h86
-rw-r--r--src/core/CL/kernels/CLROIAlignLayerKernel.cpp58
-rw-r--r--src/core/CL/kernels/CLROIAlignLayerKernel.h18
-rw-r--r--src/core/CL/kernels/CLROIPoolingLayerKernel.cpp149
-rw-r--r--src/core/CL/kernels/CLROIPoolingLayerKernel.h36
-rw-r--r--src/core/CL/kernels/CLRangeKernel.cpp45
-rw-r--r--src/core/CL/kernels/CLRangeKernel.h1
-rw-r--r--src/core/CL/kernels/CLReductionOperationKernel.cpp333
-rw-r--r--src/core/CL/kernels/CLReductionOperationKernel.h19
-rw-r--r--src/core/CL/kernels/CLRemapKernel.cpp116
-rw-r--r--src/core/CL/kernels/CLRemapKernel.h81
-rw-r--r--src/core/CL/kernels/CLReorgLayerKernel.cpp46
-rw-r--r--src/core/CL/kernels/CLReorgLayerKernel.h1
-rw-r--r--src/core/CL/kernels/CLReverseKernel.cpp48
-rw-r--r--src/core/CL/kernels/CLReverseKernel.h48
-rw-r--r--src/core/CL/kernels/CLScaleKernel.cpp286
-rw-r--r--src/core/CL/kernels/CLScaleKernel.h93
-rw-r--r--src/core/CL/kernels/CLSelectKernel.cpp37
-rw-r--r--src/core/CL/kernels/CLSelectKernel.h7
-rw-r--r--src/core/CL/kernels/CLSoftmaxLayerKernel.cpp370
-rw-r--r--src/core/CL/kernels/CLSoftmaxLayerKernel.h158
-rw-r--r--src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp104
-rw-r--r--src/core/CL/kernels/CLSpaceToBatchLayerKernel.h35
-rw-r--r--src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp27
-rw-r--r--src/core/CL/kernels/CLSpaceToDepthLayerKernel.h4
-rw-r--r--src/core/CL/kernels/CLStackLayerKernel.cpp41
-rw-r--r--src/core/CL/kernels/CLStackLayerKernel.h17
-rw-r--r--src/core/CL/kernels/CLStridedSliceKernel.cpp130
-rw-r--r--src/core/CL/kernels/CLStridedSliceKernel.h29
-rw-r--r--src/core/CL/kernels/CLTileKernel.cpp47
-rw-r--r--src/core/CL/kernels/CLTileKernel.h5
-rw-r--r--src/core/CL/kernels/CLTransposeKernel.cpp116
-rw-r--r--src/core/CL/kernels/CLTransposeKernel.h64
-rw-r--r--src/core/CL/kernels/CLWeightsReshapeKernel.cpp168
-rw-r--r--src/core/CL/kernels/CLWeightsReshapeKernel.h121
-rw-r--r--src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp157
-rw-r--r--src/core/CL/kernels/CLWinogradFilterTransformKernel.h115
-rw-r--r--src/core/CL/kernels/CLWinogradInputTransformKernel.cpp256
-rw-r--r--src/core/CL/kernels/CLWinogradInputTransformKernel.h121
-rw-r--r--src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp265
-rw-r--r--src/core/CL/kernels/CLWinogradOutputTransformKernel.h127
-rw-r--r--src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h105
348 files changed, 25939 insertions, 44183 deletions
diff --git a/src/core/CL/CLCoreRuntimeContext.cpp b/src/core/CL/CLCommandBuffer.cpp
index 6b1b1f5e7b..d094dcdaea 100644
--- a/src/core/CL/CLCoreRuntimeContext.cpp
+++ b/src/core/CL/CLCommandBuffer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,32 +21,46 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/CL/CLCoreRuntimeContext.h"
+
+#include "src/core/CL/CLCommandBuffer.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+
+#include "src/core/CL/CLCompatCommandBuffer.h"
+#include "src/core/CL/CLMutableCommandBuffer.h"
namespace arm_compute
{
-cl::Context CLCoreRuntimeContext::context()
-{
- return _ctx;
-}
-cl::CommandQueue CLCoreRuntimeContext::queue()
+std::unique_ptr<CLCommandBuffer> CLCommandBuffer::create(cl_command_queue queue)
{
- return _queue;
-}
+ const auto &cl_device = CLKernelLibrary::get().get_device();
+ const auto has_mutable_dispatch = command_buffer_mutable_dispatch_supported(cl_device);
-CLCoreRuntimeContext::CLCoreRuntimeContext()
- : _kernel_lib(nullptr), _ctx(), _queue()
-{
+ if (has_mutable_dispatch)
+ {
+ return std::make_unique<CLMutableCommandBuffer>(queue);
+ }
+ else
+ {
+ return std::make_unique<CLCompatCommandBuffer>(queue);
+ }
}
-CLCoreRuntimeContext::CLCoreRuntimeContext(CLKernelLibrary *kernel_lib, cl::Context ctx, cl::CommandQueue queue)
- : _kernel_lib(kernel_lib), _ctx(ctx), _queue(queue)
+CLCommandBuffer::CLCommandBuffer() = default;
+CLCommandBuffer::~CLCommandBuffer() = default;
+
+CLCommandBuffer::State CLCommandBuffer::state() const
{
+ return _state;
}
-CLKernelLibrary *CLCoreRuntimeContext::kernel_library() const
+CLCommandBuffer &CLCommandBuffer::state(CLCommandBuffer::State state)
{
- return _kernel_lib;
+ _state = state;
+
+ return *this;
}
+
} // namespace arm_compute
diff --git a/src/core/CL/CLCommandBuffer.h b/src/core/CL/CLCommandBuffer.h
new file mode 100644
index 0000000000..90e434161e
--- /dev/null
+++ b/src/core/CL/CLCommandBuffer.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_SRC_CORE_CL_CLCOMMANDBUFFER_H
+#define ACL_SRC_CORE_CL_CLCOMMANDBUFFER_H
+
+#include "arm_compute/core/CL/OpenCL.h"
+
+#include <cstdint>
+#include <memory>
+#include <type_traits>
+
+namespace arm_compute
+{
+
+/** Command buffer contains a list of commands that is constructed once and later enqueued multiple times.
+ *
+ * To prepare a command buffer:
+ * - Construct a new command buffer targeting a command queue using @ref CLCommandBuffer::create.
+ * - Add kernel enqueue command to the buffer using @ref CLCommandBuffer::add_kernel.
+ * The kernel must be ready to be enqueued with all the arguments set.
+ * - Specify which kernel argument is mutable after the command buffer has been finalized.
+ * - When all the kernel enqueue commands have been added, call @ref CLCommandBuffer::finalize.
+ * After this point the command buffer is ready to be executed.
+ *
+ * To execute the command buffer:
+ * - Make any changes in the value which the mutable arguments are pointing to.
+ * - Call @ref CLCommandBuffer::update to apply the argument value changes.
+ * - Call @ref CLCommandBuffer::enqueue to enqueue the command buffer to execute.
+ */
+class CLCommandBuffer
+{
+public:
+ /** Create a new command buffer targeting the specified command queue.
+ *
+ * @param[in] queue The command queue to execute the command buffer.
+ *
+ * @return A unique pointer to the newly created command buffer.
+ */
+ static std::unique_ptr<CLCommandBuffer> create(cl_command_queue queue);
+
+ /** Constructor. */
+ CLCommandBuffer();
+
+ /** Destructor. */
+ virtual ~CLCommandBuffer();
+
+ /** Disallow copy constructor. */
+ CLCommandBuffer(const CLCommandBuffer &) = delete;
+
+ /** Disallow copy assignment. */
+ CLCommandBuffer &operator=(const CLCommandBuffer &) = delete;
+
+ /** Disallow move constructor. */
+ CLCommandBuffer(CLCommandBuffer &&other) = delete;
+
+ /** Disallow move assignment. */
+ CLCommandBuffer &operator=(CLCommandBuffer &&other) = delete;
+
+ /** Add a kernel enqueue command to the command queue.
+ *
+ * This function must be called before the command buffer has been finalized.
+ *
+ * @param[in] kernel The CL kernel.
+ * @param[in] offset The global work offset.
+ * @param[in] global The global work size.
+ * @param[in] local The local work size.
+ */
+ virtual void
+ add_kernel(cl_kernel kernel, const cl::NDRange &offset, const cl::NDRange &global, const cl::NDRange &local) = 0;
+
+ /** Add the mutable argument to the current kernel enqueue command.
+ *
+ * This function must be called after @ref CLCommandBuffer::add_kernel but before the command buffer
+ * has been finalized.
+ *
+ * The pointer must be valid and it must point to the correct value at the time
+ * @ref CLCommandBuffer::update is called so that the value of the argument
+ * can be applied successfully to the kernel enqueue command.
+ *
+ * @param[in] arg_idx The index of the argument in the current kernel program.
+ * @param[in] value The pointer to the value of the argument.
+ */
+ template <typename T, typename = std::enable_if_t<std::is_arithmetic<T>::value || std::is_pointer<T>::value>>
+ void add_mutable_argument(cl_uint arg_idx, const T *value)
+ {
+ add_mutable_argument_generic(arg_idx, value, sizeof(T));
+ }
+
+ /** Finalize the command buffer. */
+ virtual void finalize() = 0;
+
+ /** Update the command buffer with new kernel argument values.
+ *
+ * This function must be called after the command buffer has been finalized.
+ *
+ * All the value pointed by the mutable argument will be applied to the command buffer.
+ */
+ virtual void update() = 0;
+
+ /** Enqueue the command buffer.
+ *
+ * This function must be called after the command buffer has been finalized.
+ */
+ virtual void enqueue() = 0;
+
+ /** Check if the command buffer has been finalized.
+ *
+ * @return true if the command buffer has been finalized.
+ */
+ virtual bool is_finalized() const = 0;
+
+protected:
+ /** Add the mutable argument to the current kernel enqueue command.
+ *
+ * @see CLCommandBuffer::add_mutable_argument for more information.
+ */
+ virtual void add_mutable_argument_generic(cl_uint arg_idx, const void *value, size_t size) = 0;
+
+ /** The state of the command buffer. */
+ enum class State : int32_t
+ {
+ /** The command buffer has been created and is being specified. */
+ Created,
+
+ /** The command buffer has been finalized and is ready to be executed. */
+ Finalized,
+ };
+
+ /** Get the state of the command buffer. */
+ State state() const;
+
+ /** Set the state of the command buffer. */
+ CLCommandBuffer &state(State state);
+
+private:
+ State _state{State::Created};
+};
+
+} // namespace arm_compute
+
+#endif // ACL_SRC_CORE_CL_CLCOMMANDBUFFER_H
diff --git a/src/core/CL/CLCompatCommandBuffer.cpp b/src/core/CL/CLCompatCommandBuffer.cpp
new file mode 100644
index 0000000000..242fd7719c
--- /dev/null
+++ b/src/core/CL/CLCompatCommandBuffer.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/core/CL/CLCompatCommandBuffer.h"
+
+#include "arm_compute/core/Error.h"
+
+#include "src/core/CL/CLUtils.h"
+
+namespace arm_compute
+{
+
+CLCompatCommandBuffer::CLCompatCommandBuffer(cl_command_queue queue) : _queue(queue)
+{
+}
+
+CLCompatCommandBuffer::~CLCompatCommandBuffer()
+{
+}
+
+void CLCompatCommandBuffer::add_kernel(cl_kernel kernel,
+ const cl::NDRange &offset,
+ const cl::NDRange &global,
+ const cl::NDRange &local)
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Created);
+
+ _kernel_cmds.push_back(KernelCommand{kernel, offset, global, local, {}});
+}
+
+void CLCompatCommandBuffer::add_mutable_argument_generic(cl_uint arg_idx, const void *value, size_t size)
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Created);
+ ARM_COMPUTE_ERROR_ON(_kernel_cmds.empty());
+
+ _kernel_cmds.back().mutable_args.push_back(cl_mutable_dispatch_arg_khr{arg_idx, size, value});
+}
+
+void CLCompatCommandBuffer::finalize()
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Created);
+
+ _kernel_cmds.shrink_to_fit();
+
+ for (auto &cmd : _kernel_cmds)
+ {
+ cmd.mutable_args.shrink_to_fit();
+ }
+
+ state(State::Finalized);
+}
+
+void CLCompatCommandBuffer::update()
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Finalized);
+
+ // Nothing to do here - The kernel arguments will be updated when each command is enqueued.
+}
+
+void CLCompatCommandBuffer::enqueue()
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Finalized);
+
+ for (const auto &cmd : _kernel_cmds)
+ {
+ for (const auto &arg : cmd.mutable_args)
+ {
+ const auto error = clSetKernelArg(cmd.kernel, arg.arg_index, arg.arg_size, arg.arg_value);
+
+ handle_cl_error("clSetKernelArg", error);
+ }
+
+ const auto error =
+ clEnqueueNDRangeKernel(_queue, cmd.kernel, static_cast<cl_uint>(cmd.global.dimensions()),
+ cmd.offset.dimensions() != 0 ? cmd.offset.get() : nullptr, cmd.global.get(),
+ cmd.local.dimensions() != 0 ? cmd.local.get() : nullptr, 0, nullptr, nullptr);
+
+ handle_cl_error("clEnqueueNDRangeKernel", error);
+ }
+}
+
+bool CLCompatCommandBuffer::is_finalized() const
+{
+ return state() == State::Finalized;
+}
+
+} // namespace arm_compute
diff --git a/src/core/CL/CLCompatCommandBuffer.h b/src/core/CL/CLCompatCommandBuffer.h
new file mode 100644
index 0000000000..d5df106425
--- /dev/null
+++ b/src/core/CL/CLCompatCommandBuffer.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_SRC_CORE_CL_CLCOMPATCOMMANDBUFFER_H
+#define ACL_SRC_CORE_CL_CLCOMPATCOMMANDBUFFER_H
+
+#include "src/core/CL/CLCommandBuffer.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+
+/** Command buffer implementation for platform without mutable dispatch command buffer extension. */
+class CLCompatCommandBuffer final : public CLCommandBuffer
+{
+public:
+ /** Create a new command buffer targeting the specified command queue.
+ *
+ * @param[in] queue The command queue to execute the command buffer.
+ */
+ CLCompatCommandBuffer(cl_command_queue queue);
+
+ /** Destructor. */
+ virtual ~CLCompatCommandBuffer();
+
+ /** Disallow copy constructor. */
+ CLCompatCommandBuffer(const CLCompatCommandBuffer &) = delete;
+
+ /** Disallow copy assignment. */
+ CLCompatCommandBuffer &operator=(const CLCompatCommandBuffer &) = delete;
+
+ /** Disallow move constructor. */
+ CLCompatCommandBuffer(CLCompatCommandBuffer &&) = delete;
+
+ /** Disallow move assignment. */
+ CLCompatCommandBuffer &operator=(CLCompatCommandBuffer &&) = delete;
+
+ void add_kernel(cl_kernel kernel,
+ const cl::NDRange &offset,
+ const cl::NDRange &global,
+ const cl::NDRange &local) override;
+
+ void finalize() override;
+
+ void update() override;
+
+ void enqueue() override;
+
+ bool is_finalized() const override;
+
+protected:
+ void add_mutable_argument_generic(cl_uint arg_idx, const void *value, size_t size) override;
+
+private:
+ struct KernelCommand
+ {
+ cl_kernel kernel;
+ cl::NDRange offset;
+ cl::NDRange global;
+ cl::NDRange local;
+
+ std::vector<cl_mutable_dispatch_arg_khr> mutable_args;
+ };
+
+private:
+ cl_command_queue _queue{};
+ std::vector<KernelCommand> _kernel_cmds{};
+};
+
+} // namespace arm_compute
+
+#endif // ACL_SRC_CORE_CL_CLCOMPATCOMMANDBUFFER_H
diff --git a/src/core/CL/CLCompileContext.cpp b/src/core/CL/CLCompileContext.cpp
index 3db0fe515a..9bbc32657e 100644
--- a/src/core/CL/CLCompileContext.cpp
+++ b/src/core/CL/CLCompileContext.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,17 +22,19 @@
* SOFTWARE.
*/
#include "arm_compute/core/CL/CLCompileContext.h"
-#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Utils.h"
+
#include "support/StringSupport.h"
+#include <regex>
+
namespace arm_compute
{
-CLBuildOptions::CLBuildOptions()
- : _build_opts()
+CLBuildOptions::CLBuildOptions() : _build_opts()
{
}
@@ -43,7 +45,7 @@ void CLBuildOptions::add_option(std::string option)
void CLBuildOptions::add_option_if(bool cond, std::string option)
{
- if(cond)
+ if (cond)
{
add_option(std::move(option));
}
@@ -61,7 +63,7 @@ void CLBuildOptions::add_options(const StringSet &options)
void CLBuildOptions::add_options_if(bool cond, const StringSet &options)
{
- if(cond)
+ if (cond)
{
add_options(options);
}
@@ -72,26 +74,40 @@ const CLBuildOptions::StringSet &CLBuildOptions::options() const
return _build_opts;
}
-Program::Program()
- : _context(), _device(), _is_binary(false), _name(), _source(), _binary()
+bool CLBuildOptions::operator==(const CLBuildOptions &other) const
+{
+ return _build_opts == other._build_opts;
+}
+
+Program::Program() : _context(), _device(), _is_binary(false), _name(), _source(), _binary()
{
}
Program::Program(cl::Context context, std::string name, std::string source)
- : _context(std::move(context)), _device(), _is_binary(false), _name(std::move(name)), _source(std::move(source)), _binary()
+ : _context(std::move(context)),
+ _device(),
+ _is_binary(false),
+ _name(std::move(name)),
+ _source(std::move(source)),
+ _binary()
{
}
Program::Program(cl::Context context, cl::Device device, std::string name, std::vector<unsigned char> binary)
- : _context(std::move(context)), _device(std::move(device)), _is_binary(true), _name(std::move(name)), _source(), _binary(std::move(binary))
+ : _context(std::move(context)),
+ _device(std::move(device)),
+ _is_binary(true),
+ _name(std::move(name)),
+ _source(),
+ _binary(std::move(binary))
{
}
Program::operator cl::Program() const
{
- if(_is_binary)
+ if (_is_binary)
{
- return cl::Program(_context, { _device }, { _binary });
+ return cl::Program(_context, {_device}, {_binary});
}
else
{
@@ -105,12 +121,12 @@ bool Program::build(const cl::Program &program, const std::string &build_options
{
return program.build(build_options.c_str()) == CL_SUCCESS;
}
- catch(const cl::Error &e)
+ catch (const cl::Error &e)
{
cl_int err = CL_SUCCESS;
const auto build_info = program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(&err);
- for(auto &pair : build_info)
+ for (auto &pair : build_info)
{
std::cerr << pair.second << std::endl;
}
@@ -126,14 +142,12 @@ cl::Program Program::build(const std::string &build_options) const
return cl_program;
}
-Kernel::Kernel()
- : _name(), _kernel()
+Kernel::Kernel() : _name(), _kernel()
{
}
Kernel::Kernel(std::string name, const cl::Program &program)
- : _name(std::move(name)),
- _kernel(cl::Kernel(program, _name.c_str()))
+ : _name(std::move(name)), _kernel(cl::Kernel(program, _name.c_str()))
{
}
CLCompileContext::CLCompileContext()
@@ -149,15 +163,19 @@ CLCompileContext::CLCompileContext(cl::Context context, const cl::Device &device
_is_wbsm_supported = get_wbsm_support_info(device);
}
-Kernel CLCompileContext::create_kernel(const std::string &kernel_name, const std::string &program_name, const std::string &program_source,
- const std::string &kernel_path, const StringSet &build_options_set, bool is_binary) const
+Kernel CLCompileContext::create_kernel(const std::string &kernel_name,
+ const std::string &program_name,
+ const std::string &program_source,
+ const std::string &kernel_path,
+ const StringSet &build_options_set,
+ bool is_binary) const
{
const std::string build_options = generate_build_options(build_options_set, kernel_path);
const std::string built_program_name = program_name + "_" + build_options;
auto built_program_it = _built_programs_map.find(built_program_name);
cl::Program cl_program;
- if(_built_programs_map.end() != built_program_it)
+ if (_built_programs_map.end() != built_program_it)
{
// If program has been built, retrieve to create kernel from it
cl_program = built_program_it->second;
@@ -177,11 +195,12 @@ Kernel CLCompileContext::create_kernel(const std::string &kernel_name, const std
return Kernel(kernel_name, cl_program);
}
-const Program &CLCompileContext::load_program(const std::string &program_name, const std::string &program_source, bool is_binary) const
+const Program &
+CLCompileContext::load_program(const std::string &program_name, const std::string &program_source, bool is_binary) const
{
const auto program_it = _programs_map.find(program_name);
- if(program_it != _programs_map.end())
+ if (program_it != _programs_map.end())
{
return program_it->second;
}
@@ -192,9 +211,10 @@ const Program &CLCompileContext::load_program(const std::string &program_name, c
ARM_COMPUTE_UNUSED(is_binary);
program = Program(_context, program_name, program_source);
#else /* EMBEDDED_KERNELS */
- if(is_binary)
+ if (is_binary)
{
- program = Program(_context, _device.cl_device(), program_name, std::vector<unsigned char>(program_source.begin(), program_source.end()));
+ program = Program(_context, _device.cl_device(), program_name,
+ std::vector<unsigned char>(program_source.begin(), program_source.end()));
}
else
{
@@ -211,20 +231,23 @@ const Program &CLCompileContext::load_program(const std::string &program_name, c
void CLCompileContext::set_context(cl::Context context)
{
_context = std::move(context);
- if(_context.get() != nullptr)
+ if (_context.get() != nullptr)
{
const auto cl_devices = _context.getInfo<CL_CONTEXT_DEVICES>();
- if(!cl_devices.empty())
+ if (!cl_devices.empty())
{
_device = CLDevice(cl_devices[0]);
}
}
}
-std::string CLCompileContext::generate_build_options(const StringSet &build_options_set, const std::string &kernel_path) const
+std::string CLCompileContext::generate_build_options(const StringSet &build_options_set,
+ const std::string &kernel_path) const
{
std::string concat_str;
+ bool ext_supported = false;
+ std::string ext_buildopts;
#if defined(ARM_COMPUTE_DEBUG_ENABLED)
// Enable debug properties in CL kernels
@@ -232,37 +255,40 @@ std::string CLCompileContext::generate_build_options(const StringSet &build_opti
#endif // defined(ARM_COMPUTE_DEBUG_ENABLED)
GPUTarget gpu_arch = get_arch_from_target(_device.target());
- concat_str += " -DGPU_ARCH=" + support::cpp11::to_string(
- static_cast<std::underlying_type<GPUTarget>::type>(gpu_arch));
+ concat_str +=
+ " -DGPU_ARCH=" + support::cpp11::to_string(static_cast<std::underlying_type<GPUTarget>::type>(gpu_arch));
- if(_device.supported("cl_khr_fp16"))
+ if (_device.supported("cl_khr_fp16"))
{
concat_str += " -DARM_COMPUTE_OPENCL_FP16_ENABLED=1 ";
}
- if(_device.supported("cl_arm_integer_dot_product_int8"))
+ if (_device.supported("cl_arm_integer_dot_product_int8") || _device.supported("cl_khr_integer_dot_product"))
{
concat_str += " -DARM_COMPUTE_OPENCL_DOT8_ENABLED=1 ";
}
- if(_device.supported("cl_arm_integer_dot_product_accumulate_int8"))
+ if (_device.supported("cl_arm_integer_dot_product_accumulate_int8"))
{
concat_str += " -DARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED=1 ";
}
- if(_device.version() == CLVersion::CL20)
- {
- concat_str += " -cl-std=CL2.0 ";
- }
- else if(_device.supported("cl_arm_non_uniform_work_group_size"))
+ std::tie(ext_supported, ext_buildopts) = _device.is_non_uniform_workgroup_supported();
+
+ if (ext_supported)
{
- concat_str += " -cl-arm-non-uniform-work-group-size ";
+ concat_str += ext_buildopts;
}
else
{
ARM_COMPUTE_ERROR("Non uniform workgroup size is not supported!!");
}
+ if (gpu_arch != GPUTarget::UNKNOWN && gpu_arch != GPUTarget::MIDGARD && get_ddk_version() >= 11)
+ {
+ concat_str += " -DUNROLL_WITH_PRAGMA ";
+ }
+
std::string build_options = stringify_set(build_options_set, kernel_path) + concat_str;
return build_options;
@@ -283,7 +309,7 @@ std::string CLCompileContext::stringify_set(const StringSet &s, const std::strin
#endif /* EMBEDDED_KERNELS */
// Concatenate set
- for(const auto &el : s)
+ for (const auto &el : s)
{
concat_set += " " + el;
}
@@ -319,8 +345,8 @@ const cl::Device &CLCompileContext::get_device() const
void CLCompileContext::set_device(cl::Device device)
{
- _device = std::move(device);
_is_wbsm_supported = get_wbsm_support_info(device);
+ _device = std::move(device);
}
cl::NDRange CLCompileContext::default_ndrange() const
@@ -328,7 +354,7 @@ cl::NDRange CLCompileContext::default_ndrange() const
GPUTarget _target = get_target_from_device(_device.cl_device());
cl::NDRange default_range;
- switch(_target)
+ switch (_target)
{
case GPUTarget::MIDGARD:
case GPUTarget::T600:
@@ -358,7 +384,8 @@ size_t CLCompileContext::max_local_workgroup_size(const cl::Kernel &kernel) cons
size_t result;
size_t err = kernel.getWorkGroupInfo(_device.cl_device(), CL_KERNEL_WORK_GROUP_SIZE, &result);
- ARM_COMPUTE_ERROR_ON_MSG(err != 0, "clGetKernelWorkGroupInfo failed to return the maximum workgroup size for the kernel");
+ ARM_COMPUTE_ERROR_ON_MSG(err != 0,
+ "clGetKernelWorkGroupInfo failed to return the maximum workgroup size for the kernel");
ARM_COMPUTE_UNUSED(err);
return result;
@@ -373,4 +400,22 @@ cl_uint CLCompileContext::get_num_compute_units() const
{
return _device.compute_units();
}
+
+int32_t CLCompileContext::get_ddk_version() const
+{
+ const std::string device_version = _device.device_version();
+ const std::regex ddk_regex("r([0-9]*)p[0-9]");
+ std::smatch ddk_match;
+
+ if (std::regex_search(device_version, ddk_match, ddk_regex))
+ {
+ return std::stoi(ddk_match[1]);
+ }
+
+ return -1;
+}
+GPUTarget CLCompileContext::get_gpu_target() const
+{
+ return _device.target();
+}
} // namespace arm_compute
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index aff897738a..5ea99d360a 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,12 +22,16 @@
* SOFTWARE.
*/
#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLCoreRuntimeContext.h"
+
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/CLTypes.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Log.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/DataTypeUtils.h"
+
+#include "src/gpu/cl/ClCompileContext.h"
+#include "src/gpu/cl/ClKernelLibrary.h"
#include <utility>
#include <vector>
@@ -36,7 +40,7 @@ namespace arm_compute
{
std::string get_cl_type_from_data_type(const DataType &dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::QASYMM8:
@@ -72,7 +76,7 @@ std::string get_cl_type_from_data_type(const DataType &dt)
std::string get_cl_promoted_type_from_data_type(const DataType &dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::QASYMM8:
@@ -102,7 +106,7 @@ std::string get_cl_promoted_type_from_data_type(const DataType &dt)
std::string get_cl_unsigned_type_from_element_size(size_t element_size)
{
- switch(element_size)
+ switch (element_size)
{
case 1:
return "uchar";
@@ -120,7 +124,7 @@ std::string get_cl_unsigned_type_from_element_size(size_t element_size)
std::string get_cl_signed_type_from_element_size(size_t element_size)
{
- switch(element_size)
+ switch (element_size)
{
case 1:
return "char";
@@ -138,11 +142,10 @@ std::string get_cl_signed_type_from_element_size(size_t element_size)
std::string get_cl_select_type_from_data_type(const DataType &dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::QASYMM8:
- return "uchar";
case DataType::S8:
case DataType::QASYMM8_SIGNED:
case DataType::QSYMM8:
@@ -172,7 +175,7 @@ std::string get_cl_select_type_from_data_type(const DataType &dt)
std::string get_cl_dot8_acc_type_from_data_type(const DataType &dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::QASYMM8:
@@ -190,7 +193,7 @@ std::string get_cl_dot8_acc_type_from_data_type(const DataType &dt)
std::string get_data_size_from_data_type(const DataType &dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::S8:
@@ -242,8 +245,9 @@ bool dot8_supported(const cl::Device &device)
const GPUTarget gpu_target = get_target_from_name(device_name);
// SW_WORKAROUND: Workaround for DDK revision r14p0.to enable cl_arm_integer_dot_product_int8
- std::set<GPUTarget> sw_workaround_issue = { GPUTarget::G76 };
- return (device_supports_extension(device, "cl_arm_integer_dot_product_int8") || sw_workaround_issue.count(gpu_target) != 0);
+ std::set<GPUTarget> sw_workaround_issue = {GPUTarget::G76};
+ return (device_supports_extension(device, "cl_arm_integer_dot_product_int8") ||
+ sw_workaround_issue.count(gpu_target) != 0);
}
bool dot8_acc_supported(const cl::Device &device)
@@ -254,19 +258,23 @@ bool dot8_acc_supported(const cl::Device &device)
CLVersion get_cl_version(const cl::Device &device)
{
std::string version_str = device.getInfo<CL_DEVICE_VERSION>();
- if(version_str.find("OpenCL 2") != std::string::npos)
+ if (version_str.find("OpenCL 3") != std::string::npos)
+ {
+ return CLVersion::CL30;
+ }
+ else if (version_str.find("OpenCL 2") != std::string::npos)
{
return CLVersion::CL20;
}
- else if(version_str.find("OpenCL 1.2") != std::string::npos)
+ else if (version_str.find("OpenCL 1.2") != std::string::npos)
{
return CLVersion::CL12;
}
- else if(version_str.find("OpenCL 1.1") != std::string::npos)
+ else if (version_str.find("OpenCL 1.1") != std::string::npos)
{
return CLVersion::CL11;
}
- else if(version_str.find("OpenCL 1.0") != std::string::npos)
+ else if (version_str.find("OpenCL 1.0") != std::string::npos)
{
return CLVersion::CL10;
}
@@ -281,14 +289,15 @@ bool device_supports_extension(const cl::Device &device, const char *extension_n
return (pos != std::string::npos);
}
-bool cl_winograd_convolution_layer_supported(const Size2D &output_tile, const Size2D &kernel_size, DataLayout data_layout)
+bool cl_winograd_convolution_layer_supported(const Size2D &output_tile,
+ const Size2D &kernel_size,
+ DataLayout data_layout)
{
ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
using WinogradConfiguration = std::pair<std::pair<int, int>, std::pair<int, int>>;
- std::vector<WinogradConfiguration> winograd_configs_nchw =
- {
+ std::vector<WinogradConfiguration> winograd_configs_nchw = {
WinogradConfiguration(std::pair<int, int>(1, 2), std::pair<int, int>(1, 3)),
WinogradConfiguration(std::pair<int, int>(1, 4), std::pair<int, int>(1, 3)),
WinogradConfiguration(std::pair<int, int>(2, 1), std::pair<int, int>(3, 1)),
@@ -297,11 +306,9 @@ bool cl_winograd_convolution_layer_supported(const Size2D &output_tile, const Si
WinogradConfiguration(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3)),
WinogradConfiguration(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5)),
WinogradConfiguration(std::pair<int, int>(4, 1), std::pair<int, int>(5, 1)),
- WinogradConfiguration(std::pair<int, int>(1, 4), std::pair<int, int>(1, 5))
- };
+ WinogradConfiguration(std::pair<int, int>(1, 4), std::pair<int, int>(1, 5))};
- std::vector<WinogradConfiguration> winograd_configs_nhwc =
- {
+ std::vector<WinogradConfiguration> winograd_configs_nhwc = {
WinogradConfiguration(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3)),
WinogradConfiguration(std::pair<int, int>(1, 4), std::pair<int, int>(1, 3)),
WinogradConfiguration(std::pair<int, int>(4, 1), std::pair<int, int>(3, 1)),
@@ -318,19 +325,21 @@ bool cl_winograd_convolution_layer_supported(const Size2D &output_tile, const Si
std::pair<int, int>(kernel_size.width, kernel_size.height));
// Return true if supported
- if(data_layout == DataLayout::NCHW)
+ if (data_layout == DataLayout::NCHW)
{
- return (std::find(winograd_configs_nchw.begin(), winograd_configs_nchw.end(), p) != winograd_configs_nchw.end());
+ return (std::find(winograd_configs_nchw.begin(), winograd_configs_nchw.end(), p) !=
+ winograd_configs_nchw.end());
}
else
{
- return (std::find(winograd_configs_nhwc.begin(), winograd_configs_nhwc.end(), p) != winograd_configs_nhwc.end());
+ return (std::find(winograd_configs_nhwc.begin(), winograd_configs_nhwc.end(), p) !=
+ winograd_configs_nhwc.end());
}
}
size_t preferred_vector_width(const cl::Device &device, const DataType dt)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::S8:
@@ -376,7 +385,7 @@ size_t get_cl_image_pitch_alignment(const cl::Device &device)
cl_int err = clGetDeviceInfo(device(), CL_DEVICE_IMAGE_PITCH_ALIGNMENT, sizeof(cl_uint), &pixel_aligment, nullptr);
- if(err == CL_SUCCESS)
+ if (err == CL_SUCCESS)
{
return pixel_aligment;
}
@@ -386,26 +395,27 @@ size_t get_cl_image_pitch_alignment(const cl::Device &device)
}
}
-cl::Kernel create_opencl_kernel(CLCoreRuntimeContext *ctx, const std::string &kernel_name, const CLBuildOptions &build_opts)
+bool get_cl_non_uniform_work_group_supported(const cl::Device &device)
{
- if(ctx && ctx->kernel_library())
- {
- // New api going through the core context
- return static_cast<cl::Kernel>(ctx->kernel_library()->create_kernel(kernel_name, build_opts.options()));
- }
- else
- {
- // Legacy code through the singleton
- return static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
- }
+ cl_bool supported = CL_FALSE;
+
+ cl_int err =
+ clGetDeviceInfo(device(), CL_DEVICE_NON_UNIFORM_WORK_GROUP_SUPPORT, sizeof(cl_bool), &supported, nullptr);
+
+ return (err == CL_SUCCESS && supported == CL_TRUE);
}
-cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set<std::string> &build_opts)
+cl::Kernel
+create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set<std::string> &build_opts)
{
- const std::string program_name = CLKernelLibrary::get().get_program_name(kernel_name);
- std::pair<std::string, bool> kernel_src = CLKernelLibrary::get().get_program(program_name);
- const std::string kernel_path = CLKernelLibrary::get().get_kernel_path();
- return static_cast<cl::Kernel>(ctx.create_kernel(kernel_name, program_name, kernel_src.first, kernel_path, build_opts, kernel_src.second));
+ opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get();
+
+ const std::string program_name = klib.program_name(kernel_name);
+ auto kernel_src = klib.program(program_name);
+ const std::string kernel_path = klib.kernel_path();
+
+ return static_cast<cl::Kernel>(ctx.create_kernel(kernel_name, program_name, kernel_src.program, kernel_path,
+ build_opts, kernel_src.is_binary));
}
cl::NDRange create_lws_hint_parallel_implementations(unsigned int input_dimension, unsigned int vector_size)
@@ -419,8 +429,9 @@ cl::NDRange create_lws_hint_parallel_implementations(unsigned int input_dimensio
bool get_wbsm_support_info(const cl::Device &device)
{
cl_bitfield capabilities = 0;
- cl_int err = clGetDeviceInfo(device.get(), ARM_COMPUTE_LIBRARY_OPENCL_DEVICE_CAPABILITIES_ARM, sizeof(cl_bitfield), &capabilities, nullptr);
- if((err == CL_SUCCESS) && (capabilities & ARM_COMPUTE_LIBRARY_OPENCL_EXEC_WBSM_ARM))
+ cl_int err = clGetDeviceInfo(device.get(), CL_DEVICE_SCHEDULING_CONTROLS_CAPABILITIES_ARM, sizeof(cl_bitfield),
+ &capabilities, nullptr);
+ if ((err == CL_SUCCESS) && (capabilities & CL_KERNEL_EXEC_INFO_WORKGROUP_BATCH_SIZE_MODIFIER_ARM))
{
return true;
}
@@ -429,12 +440,75 @@ bool get_wbsm_support_info(const cl::Device &device)
void set_wbsm(cl::Kernel &kernel, cl_int wbsm_hint)
{
- cl_int err = clSetKernelExecInfo(kernel.get(),
- ARM_COMPUTE_LIBRARY_OPENCL_EXEC_WBSM_ARM,
- sizeof(cl_int),
- &wbsm_hint);
+ cl_int err = clSetKernelExecInfo(kernel.get(), CL_KERNEL_EXEC_INFO_WORKGROUP_BATCH_SIZE_MODIFIER_ARM,
+ sizeof(cl_int), &wbsm_hint);
ARM_COMPUTE_UNUSED(err);
ARM_COMPUTE_ERROR_ON(err != CL_SUCCESS);
}
+bool export_to_cl_image(const ITensorInfo *tensor)
+{
+ if (tensor->tensor_shape()[0] % 4 != 0)
+ {
+ return false;
+ }
+
+ // If not floating point
+ if (!is_data_type_float(tensor->data_type()))
+ {
+ return false;
+ }
+
+ // Check if the cl_khr_image2d_from_buffer extension is supported on the target platform
+ if (!image2d_from_buffer_supported(CLKernelLibrary::get().get_device()))
+ {
+ return false;
+ }
+
+ // Check cl image pitch alignment
+ if (get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) == 0)
+ {
+ return false;
+ }
+
+ const size_t image_w = tensor->tensor_shape()[0] / 4;
+ const size_t image_h = tensor->tensor_shape().total_size() / tensor->tensor_shape()[0];
+ const size_t max_image_w = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>();
+ const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>();
+
+ if (image_w > max_image_w || image_h > max_image_h)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void set_unroll_with_pragma(CLBuildOptions &built_opts, std::initializer_list<int> values)
+{
+ for (const int value : values)
+ {
+ if (value > max_manual_loop_unrolling)
+ {
+ built_opts.add_option("-DUNROLL_WITH_PRAGMA");
+ return;
+ }
+ }
+}
+
+bool arm_matrix_multiply_supported(const cl::Device &device)
+{
+ return device_supports_extension(device, "cl_arm_matrix_multiply");
+}
+
+bool command_buffer_supported(const cl::Device &device)
+{
+ return device_supports_extension(device, "cl_khr_command_buffer");
+}
+
+bool command_buffer_mutable_dispatch_supported(const cl::Device &device)
+{
+ return device_supports_extension(device, "cl_khr_command_buffer_mutable_dispatch");
+}
+
} // namespace arm_compute
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 14d3a2cad5..e69d006750 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -23,1019 +23,116 @@
*/
#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Utils.h"
-#include "support/StringSupport.h"
+
+#include "src/gpu/cl/ClKernelLibrary.h"
#include <algorithm>
#include <array>
#include <fstream>
#include <utility>
#include <vector>
-
-#ifdef ARM_COMPUTE_COMPRESSED_KERNELS
-#include <zlib.h>
-
-namespace
-{
-/* Decoding table */
-constexpr std::array<uint8_t, 256> b64_invtab =
-{
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 62, 0, 0, 0, 63,
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 0, 0, 0, 0, 0, 0,
- 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 0, 0, 0, 0,
- 0, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-
-/** Decode a base64 encoded string
- *
- * @param[in] str Base64 encoded string to decode
- *
- * @return The decode string in case of a valid, non-empty string otherwise an empty string
- */
-std::string decode_base64(const std::string &str)
-{
- constexpr const char pad_char = '=';
-
- // Handle empty string
- if(str.empty())
- {
- return {};
- }
-
- // Base64 encoded string has size multiple of 4
- if(str.length() % 4)
- {
- return {};
- }
-
- //
- // Check encoded string padding
- std::size_t padding = (str.rbegin()[0] == pad_char) + (str.rbegin()[1] == pad_char);
- const int str_len = str.size();
-
- // Reserve memory for the decoded string
- // Note each 4 consecutive elements of 6-bit encode 3 bytes
- std::string dec_b64;
- dec_b64.reserve(((str_len / 4) * 3));
-
- // Block decoding function (exclude padding)
- int c = 0;
- const int end = str_len - 4 - padding;
- for(; c <= end; c += 4)
- {
- const int byte0 = b64_invtab[str[c]];
- const int byte1 = b64_invtab[str[c + 1]];
- const int byte2 = b64_invtab[str[c + 2]];
- const int byte3 = b64_invtab[str[c + 3]];
-
- dec_b64.push_back((byte0 << 2) | (byte1 >> 4));
- dec_b64.push_back((byte1 << 4) | (byte2 >> 2));
- dec_b64.push_back((byte2 << 6) | (byte3));
- }
-
- // Last step that might contain padding symbols
- if(padding == 1)
- {
- const int byte0 = b64_invtab[str[c]];
- const int byte1 = b64_invtab[str[c + 1]];
- const int byte2 = b64_invtab[str[c + 2]];
-
- dec_b64.push_back((byte0 << 2) | (byte1 >> 4));
- dec_b64.push_back((byte1 << 4) | (byte2 >> 2));
- }
- else if(padding == 2)
- {
- const int byte0 = b64_invtab[str[c]];
- const int byte1 = b64_invtab[str[c + 1]];
-
- dec_b64.push_back((byte0 << 2) | (byte1 >> 4));
- }
-
- return dec_b64;
-}
-
-/** Decompress a zlib compressed string
- *
- * @param[in] str ZLib compressed string
- *
- * @return The decompressed string if successful, otherwise false.
- */
-std::string decompress_zlib(const std::string &str)
-{
- // Create and initialize decompression stream
- z_stream ds{};
- if(inflateInit(&ds) != Z_OK)
- {
- return std::string();
- }
- ds.avail_in = str.size();
- ds.next_in = (Bytef *)str.data();
-
- // Roll-over the string using a buffer and decompress
- int status = Z_OK;
- char roll_buff[16384];
- std::string inflated_str;
- do
- {
- ds.avail_out = sizeof(roll_buff);
- ds.next_out = reinterpret_cast<Bytef *>(roll_buff);
-
- status = inflate(&ds, 0);
- if(inflated_str.size() < ds.total_out)
- {
- inflated_str.append(roll_buff, ds.total_out - inflated_str.size());
- }
- }
- while(status == Z_OK);
-
- // Finalize decompression stream
- inflateEnd(&ds);
- if(status != Z_STREAM_END)
- {
- return std::string();
- }
-
- return inflated_str;
-}
-} // namespace
-#endif /* ARM_COMPUTE_COMPRESSED_KERNELS */
-
-using namespace arm_compute;
-const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
-{
- { "activation_layer", "activation_layer.cl" },
- { "activation_layer_quant", "activation_layer_quant.cl" },
- { "activation_layer_quant_f32", "activation_layer_quant.cl" },
- { "arg_min_max_x", "arg_min_max.cl" },
- { "arg_min_max_y", "arg_min_max.cl" },
- { "arg_min_max_z", "arg_min_max.cl" },
- { "arg_min_max_w", "arg_min_max.cl" },
- { "batch_to_space_nchw", "batch_to_space.cl" },
- { "batch_to_space_static_nchw", "batch_to_space.cl" },
- { "batch_to_space_nhwc", "batch_to_space.cl" },
- { "batch_to_space_static_nhwc", "batch_to_space.cl" },
- { "batchnormalization_layer_nchw", "batchnormalization_layer.cl" },
- { "batchnormalization_layer_nhwc", "batchnormalization_layer.cl" },
- { "bitwise_or", "bitwise_op.cl" },
- { "bitwise_and", "bitwise_op.cl" },
- { "bitwise_xor", "bitwise_op.cl" },
- { "bitwise_not", "bitwise_op.cl" },
- { "bounding_box_transform", "bounding_box_transform.cl" },
- { "bounding_box_transform_quantized", "bounding_box_transform_quantized.cl" },
- { "channel_shuffle_nchw", "channel_shuffle.cl" },
- { "channel_shuffle_nhwc", "channel_shuffle.cl" },
- { "compare_equal", "comparisons.cl" },
- { "compare_equal_quantized", "comparisons.cl" },
- { "compare_notequal", "comparisons.cl" },
- { "compare_notequal_quantized", "comparisons.cl" },
- { "compare_greater", "comparisons.cl" },
- { "compare_greater_quantized", "comparisons.cl" },
- { "compare_greaterequal", "comparisons.cl" },
- { "compare_greaterequal_quantized", "comparisons.cl" },
- { "compare_less", "comparisons.cl" },
- { "compare_less_quantized", "comparisons.cl" },
- { "compare_lessequal", "comparisons.cl" },
- { "compare_lessequal_quantized", "comparisons.cl" },
- { "concatenate", "concatenate.cl" },
- { "concatenate_width", "concatenate.cl" },
- { "concatenate_height", "concatenate.cl" },
- { "concatenate_width_x2", "concatenate.cl" },
- { "concatenate_width_x4", "concatenate.cl" },
- { "col2im", "col2im.cl" },
- { "convert_depth_down", "depth_convert.cl" },
- { "convert_depth_up", "depth_convert.cl" },
- { "convert_fc_weights", "convert_fc_weights.cl" },
- { "copy_tensor", "copy_tensor.cl" },
- { "crop_tensor", "crop_tensor.cl" },
- { "deconvolution_reshape", "deconvolution_layer.cl" },
- { "deconvolution_upsample", "deconvolution_layer.cl" },
- { "depthwise_convolution_3x3", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_f16", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_nhwc", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_nhwc_stride1", "depthwise_convolution.cl" },
- { "dwc_MxN_native_fp_nhwc", "depthwise_convolution.cl" },
- { "dwc_MxN_native_quantized8_nhwc", "depthwise_convolution_quantized.cl" },
- { "dwc_3x3_native_quantized8_nchw", "depthwise_convolution_quantized.cl" },
- { "dwc_3x3_native_quantized8_dot8_nchw", "depthwise_convolution_quantized.cl" },
- { "dwc_3x3_reshaped_quantized8_nhwc", "depthwise_convolution_quantized.cl" },
- { "dwc_3x3_reshaped_quantized8_stride1_nhwc", "depthwise_convolution_quantized.cl" },
- { "dwc_3x3_reshaped_quantized8_dot8_stride1_nhwc", "depthwise_convolution_quantized.cl" },
- { "depth_to_space_nchw", "depth_to_space.cl" },
- { "depth_to_space_nhwc", "depth_to_space.cl" },
- { "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32", "depthwise_convolution.cl" },
- { "depthwise_convolution_reshape_weights", "depthwise_convolution.cl" },
- { "dequantization_layer", "dequantization_layer.cl" },
- { "dequantization_layer_per_channel_nhwc", "dequantization_layer.cl" },
- { "dequantization_layer_per_channel_nchw", "dequantization_layer.cl" },
- { "direct_convolution_nhwc", "direct_convolution.cl" },
- { "direct_convolution1x1", "direct_convolution1x1.cl" },
- { "direct_convolution1x1_f32_bifrost", "direct_convolution1x1.cl" },
- { "direct_convolution3x3", "direct_convolution3x3.cl" },
- { "direct_convolution3x3_f32_bifrost", "direct_convolution3x3.cl" },
- { "direct_convolution5x5", "direct_convolution5x5.cl" },
- { "direct_convolution5x5_f32_bifrost", "direct_convolution5x5.cl" },
- { "direct_convolution_quantized", "direct_convolution_quantized.cl" },
- { "elementwise_operation_ADD", "elementwise_operation.cl" },
- { "elementwise_operation_SUB", "elementwise_operation.cl" },
- { "elementwise_operation_MAX", "elementwise_operation.cl" },
- { "elementwise_operation_MIN", "elementwise_operation.cl" },
- { "elementwise_operation_DIV", "elementwise_operation.cl" },
- { "elementwise_operation_SQUARED_DIFF", "elementwise_operation.cl" },
- { "elementwise_operation_POWER", "elementwise_operation.cl" },
- { "elementwise_operation_PRELU", "elementwise_operation.cl" },
- { "elementwise_operation_AND", "elementwise_operation.cl" },
- { "elementwise_operation_OR", "elementwise_operation.cl" },
- { "elementwise_operation_ADD_quantized", "elementwise_operation_quantized.cl" },
- { "elementwise_operation_SUB_quantized", "elementwise_operation_quantized.cl" },
- { "elementwise_operation_MAX_quantized", "elementwise_operation_quantized.cl" },
- { "elementwise_operation_MIN_quantized", "elementwise_operation_quantized.cl" },
- { "elementwise_operation_DIV_quantized", "elementwise_operation_quantized.cl" },
- { "elementwise_operation_SQUARED_DIFF_quantized", "elementwise_operation_quantized.cl" },
- { "elementwise_operation_PRELU_quantized", "elementwise_operation_quantized.cl" },
- { "elementwise_unary", "elementwise_unary.cl" },
- { "fft_digit_reverse_axis_0", "fft_digit_reverse.cl" },
- { "fft_digit_reverse_axis_1", "fft_digit_reverse.cl" },
- { "fft_radix_2_first_stage_axis_0", "fft.cl" },
- { "fft_radix_2_first_stage_axis_1", "fft.cl" },
- { "fft_radix_2_axis_0", "fft.cl" },
- { "fft_radix_2_axis_1", "fft.cl" },
- { "fft_radix_3_first_stage_axis_0", "fft.cl" },
- { "fft_radix_3_first_stage_axis_1", "fft.cl" },
- { "fft_radix_3_axis_0", "fft.cl" },
- { "fft_radix_3_axis_1", "fft.cl" },
- { "fft_radix_4_first_stage_axis_0", "fft.cl" },
- { "fft_radix_4_first_stage_axis_1", "fft.cl" },
- { "fft_radix_4_axis_0", "fft.cl" },
- { "fft_radix_4_axis_1", "fft.cl" },
- { "fft_radix_5_first_stage_axis_0", "fft.cl" },
- { "fft_radix_5_first_stage_axis_1", "fft.cl" },
- { "fft_radix_5_axis_0", "fft.cl" },
- { "fft_radix_5_axis_1", "fft.cl" },
- { "fft_radix_7_first_stage_axis_0", "fft.cl" },
- { "fft_radix_7_first_stage_axis_1", "fft.cl" },
- { "fft_radix_7_axis_0", "fft.cl" },
- { "fft_radix_7_axis_1", "fft.cl" },
- { "fft_radix_8_first_stage_axis_0", "fft.cl" },
- { "fft_radix_8_first_stage_axis_1", "fft.cl" },
- { "fft_radix_8_axis_0", "fft.cl" },
- { "fft_radix_8_axis_1", "fft.cl" },
- { "fft_scale_conj", "fft_scale.cl" },
- { "fill_image_borders_constant", "fill_border.cl" },
- { "fill_image_borders_replicate", "fill_border.cl" },
- { "floor_layer", "floor.cl" },
- { "fuse_batchnormalization_layer", "batchnormalization_layer.cl" },
- { "gather", "gather.cl" },
- { "gemm_ma_f16", "gemm.cl" },
- { "gemm_ma_f32", "gemm.cl" },
- { "gemm_mv", "gemv.cl" },
- { "gemm_mv_quantized", "gemv.cl" },
- { "gemm_mm_interleaved_transposed_f16", "gemm_v1.cl" },
- { "gemm_mm_interleaved_transposed_f16_acc32", "gemm_v1.cl" },
- { "gemm_mm_interleaved_transposed_f16_bifrost", "gemm_v1.cl" },
- { "gemm_mm_interleaved_transposed_f32", "gemm_v1.cl" },
- { "gemm_mm_interleaved_transposed_f32_bifrost", "gemm_v1.cl" },
- { "gemm_mm_floating_point", "gemm_v1.cl" },
- { "gemm_mm_floating_point_f16_bifrost", "gemm_v1.cl" },
- { "gemm_mm_floating_point_f16_bifrost_acc32", "gemm_v1.cl" },
- { "gemm_mm_floating_point_f32_bifrost", "gemm_v1.cl" },
- { "gemm_mm_floating_point_f32_bifrost_1000", "gemm_v1.cl" },
- { "gemm_mm_native", "gemm.cl" },
- { "gemm_mm_reshaped_lhs_nt_rhs_t", "gemm.cl" },
- { "gemm_mm_reshaped_lhs_nt_rhs_t_texture", "gemm.cl" },
- { "gemm_mm_reshaped_lhs_t_rhs_nt", "gemm.cl" },
- { "gemm_mm_reshaped_lhs_t_rhs_nt_texture", "gemm.cl" },
- { "gemm_mm_reshaped_only_rhs_nt", "gemm.cl" },
- { "gemm_mm_reshaped_only_rhs_nt_texture", "gemm.cl" },
- { "gemm_mm_reshaped_only_rhs_t", "gemm.cl" },
- { "gemm_mm_reshaped_only_rhs_t_texture", "gemm.cl" },
- { "gemm_lc_vm_f32", "gemm.cl" },
- { "gemm_reshape_lhs_matrix_nt", "gemm.cl" },
- { "gemm_reshape_lhs_matrix_t", "gemm.cl" },
- { "gemm_reshape_rhs_matrix_nt", "gemm.cl" },
- { "gemm_reshape_rhs_matrix_t", "gemm.cl" },
- { "gemmlowp_matrix_a_reduction", "gemmlowp.cl" },
- { "gemmlowp_matrix_a_reduction_dot8", "gemmlowp.cl" },
- { "gemmlowp_matrix_b_reduction", "gemmlowp.cl" },
- { "gemmlowp_mm_native", "gemmlowp.cl" },
- { "gemmlowp_mm_reshaped_lhs_nt_rhs_t", "gemmlowp.cl" },
- { "gemmlowp_mm_reshaped_only_rhs_t", "gemmlowp.cl" },
- { "gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint", "gemmlowp.cl" },
- { "gemmlowp_offset_contribution", "gemmlowp.cl" },
- { "gemmlowp_offset_contribution_quantize_down", "gemmlowp.cl" },
- { "gemmlowp_offset_contribution_quantize_down_fixedpoint", "gemmlowp.cl" },
- { "gemmlowp_output_stage_quantize_down", "gemmlowp.cl" },
- { "gemmlowp_output_stage_quantize_down_fixedpoint", "gemmlowp.cl" },
- { "gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16", "gemmlowp.cl" },
- { "gemmlowp_output_stage_quantize_down_float", "gemmlowp.cl" },
- { "generate_proposals_compute_all_anchors", "generate_proposals.cl" },
- { "generate_proposals_compute_all_anchors_quantized", "generate_proposals_quantized.cl" },
- { "im2col1x1_stridex1_nchw", "im2col.cl" },
- { "im2col3x3_nchw", "im2col.cl" },
- { "im2col5x5_nchw", "im2col.cl" },
- { "im2col11x11_padx0_pady0_nchw", "im2col.cl" },
- { "im2col_generic_nchw", "im2col.cl" },
- { "im2col_generic_padx0_pady0_nchw", "im2col.cl" },
- { "im2col3x3_nhwc", "im2col.cl" },
- { "im2col9x9_nhwc", "im2col.cl" },
- { "im2col_generic_nhwc", "im2col.cl" },
- { "instance_normalization", "instance_normalization.cl" },
- { "l2_normalize_x", "l2_normalize.cl" },
- { "l2_normalize_y", "l2_normalize.cl" },
- { "l2_normalize_z", "l2_normalize.cl" },
- { "max_unpooling_layer_2", "unpooling_layer.cl" },
- { "mean_stddev_normalization", "mean_stddev_normalization.cl" },
- { "memset", "memset.cl" },
- { "minmax_layer", "minmax_layer.cl" },
- { "non_max_suppression", "nonmax.cl" },
- { "normalization_layer_cross_map", "normalization_layer.cl" },
- { "normalization_layer_in_map_nchw", "normalization_layer.cl" },
- { "normalization_layer_in_map_nhwc", "normalization_layer.cl" },
- { "normalize_planar_yuv_layer_nchw", "normalize_planar_yuv_layer.cl" },
- { "normalize_planar_yuv_layer_nhwc", "normalize_planar_yuv_layer.cl" },
- { "normalize_planar_yuv_layer_q8_nchw", "normalize_planar_yuv_layer_quantized.cl" },
- { "normalize_planar_yuv_layer_q8_nhwc", "normalize_planar_yuv_layer_quantized.cl" },
- { "pad_layer_constant", "pad_layer.cl" },
- { "pad_layer_symmetric_reflect", "pad_layer.cl" },
- { "permute", "permute.cl" },
- { "pixelwise_mul_complex", "pixelwise_mul_float.cl" },
- { "pixelwise_mul_float", "pixelwise_mul_float.cl" },
- { "pixelwise_mul_int", "pixelwise_mul_int.cl" },
- { "pixelwise_mul_quantized", "pixelwise_mul_int.cl" },
- { "pooling_layer_2", "pooling_layer.cl" },
- { "pooling_layer_3", "pooling_layer.cl" },
- { "pooling_layer_optimized_3", "pooling_layer.cl" },
- { "pooling_layer_7", "pooling_layer.cl" },
- { "pooling_layer_MxN_nchw", "pooling_layer.cl" },
- { "pooling_layer_MxN_nhwc", "pooling_layer.cl" },
- { "pooling_layer_2x2_nhwc", "pooling_layer.cl" },
- { "pooling_layer_2_nchw_indices_fp32", "pooling_layer.cl" },
- { "pooling_layer_2_nchw_indices_fp16", "pooling_layer.cl" },
- { "pooling_layer_MxN_quantized_nhwc", "pooling_layer_quantized.cl" },
- { "pooling_layer_MxN_quantized_nchw", "pooling_layer_quantized.cl" },
- { "prior_box_layer_nchw", "prior_box_layer.cl" },
- { "qlstm_layer_normalization", "qlstm_layer_normalization.cl" },
- { "quantization_layer", "quantization_layer.cl" },
- { "range", "range.cl" },
- { "range_quantized", "range.cl" },
- { "reduction_operation_x", "reduction_operation.cl" },
- { "reduction_operation_non_parallel_x", "reduction_operation.cl" },
- { "reduction_operation_y", "reduction_operation.cl" },
- { "reduction_operation_z", "reduction_operation.cl" },
- { "reduction_operation_w", "reduction_operation.cl" },
- { "remap_nearest_neighbour", "remap.cl" },
- { "remap_bilinear", "remap.cl" },
- { "reorg_layer_nchw", "reorg_layer.cl" },
- { "reorg_layer_nhwc", "reorg_layer.cl" },
- { "reshape_layer", "reshape_layer.cl" },
- { "reshape_to_columns", "convolution_layer.cl" },
- { "reverse", "reverse.cl" },
- { "roi_align_layer", "roi_align_layer.cl" },
- { "roi_align_layer_quantized", "roi_align_layer_quantized.cl" },
- { "roi_pooling_layer", "roi_pooling_layer.cl" },
- { "scale_nearest_neighbour_nchw", "scale.cl" },
- { "scale_nearest_neighbour_nhwc", "scale.cl" },
- { "scale_bilinear_nchw", "scale.cl" },
- { "scale_bilinear_nhwc", "scale.cl" },
- { "scale_bilinear_quantized_nchw", "scale_quantized.cl" },
- { "scale_bilinear_quantized_nhwc", "scale_quantized.cl" },
- { "select_same_rank", "select.cl" },
- { "select_different_rank_2", "select.cl" },
- { "select_different_rank_n", "select.cl" },
- { "softmax_layer_norm", "softmax_layer.cl" },
- { "softmax_layer_norm_quantized", "softmax_layer_quantized.cl" },
- { "softmax_layer_max_shift_exp_sum_quantized_serial", "softmax_layer_quantized.cl" },
- { "softmax_layer_max_shift_exp_sum_quantized_parallel", "softmax_layer_quantized.cl" },
- { "softmax_layer_max_shift_exp_sum_serial", "softmax_layer.cl" },
- { "space_to_batch_nchw", "space_to_batch.cl" },
- { "space_to_batch_static_nchw", "space_to_batch.cl" },
- { "space_to_batch_nhwc", "space_to_batch.cl" },
- { "space_to_batch_static_nhwc", "space_to_batch.cl" },
- { "space_to_depth_nchw", "space_to_depth.cl" },
- { "space_to_depth_nhwc", "space_to_depth.cl" },
- { "softmax_layer_max_shift_exp_sum_parallel", "softmax_layer.cl" },
- { "stack_layer", "stack_layer.cl" },
- { "strided_slice", "slice_ops.cl" },
- { "tile", "tile.cl" },
- { "transpose", "transpose.cl" },
- { "upsample_layer_nchw", "upsample_layer.cl" },
- { "upsample_layer_nhwc", "upsample_layer.cl" },
- { "winograd_filter_transform_2x2_3x3_nchw", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_2x1_3x1_nchw", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_1x2_1x3_nchw", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_4x4_3x3_nchw", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_4x1_3x1_nchw", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_1x4_1x3_nchw", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_4x4_5x5_nchw", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_4x1_5x1_nchw", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_1x4_1x5_nchw", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_4x1_3x1_nhwc", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_1x4_1x3_nhwc", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_4x4_3x3_nhwc", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_4x4_5x5_nhwc", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_4x1_5x1_nhwc", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_1x4_1x5_nhwc", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_2x2_7x7_nhwc", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_2x1_7x1_nhwc", "winograd_filter_transform.cl" },
- { "winograd_filter_transform_1x2_1x7_nhwc", "winograd_filter_transform.cl" },
- { "winograd_input_transform_2x2_3x3_stepz1_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_2x2_3x3_stepz2_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_2x1_3x1_stepz1_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_2x1_3x1_stepz2_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_1x2_1x3_stepz1_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_1x2_1x3_stepz2_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_4x4_3x3_stepz1_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_4x1_3x1_stepz1_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_1x4_1x3_stepz1_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_4x4_5x5_stepz1_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_4x1_5x1_stepz1_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_1x4_1x5_stepz1_nchw", "winograd_input_transform.cl" },
- { "winograd_input_transform_4x1_3x1_stepz1_nhwc", "winograd_input_transform.cl" },
- { "winograd_input_transform_1x4_1x3_stepz1_nhwc", "winograd_input_transform.cl" },
- { "winograd_input_transform_4x4_3x3_stepz1_nhwc", "winograd_input_transform.cl" },
- { "winograd_input_transform_4x4_5x5_stepz1_nhwc", "winograd_input_transform.cl" },
- { "winograd_input_transform_4x1_5x1_stepz1_nhwc", "winograd_input_transform.cl" },
- { "winograd_input_transform_1x4_1x5_stepz1_nhwc", "winograd_input_transform.cl" },
- { "winograd_input_transform_2x2_7x7_stepz1_nhwc", "winograd_input_transform.cl" },
- { "winograd_input_transform_2x1_7x1_stepz1_nhwc", "winograd_input_transform.cl" },
- { "winograd_input_transform_1x2_1x7_stepz1_nhwc", "winograd_input_transform.cl" },
- { "winograd_output_transform_2x2_3x3_nchw", "winograd_output_transform.cl" },
- { "winograd_output_transform_2x1_3x1_nchw", "winograd_output_transform.cl" },
- { "winograd_output_transform_1x2_1x3_nchw", "winograd_output_transform.cl" },
- { "winograd_output_transform_4x4_3x3_nchw", "winograd_output_transform.cl" },
- { "winograd_output_transform_4x1_3x1_nchw", "winograd_output_transform.cl" },
- { "winograd_output_transform_1x4_1x3_nchw", "winograd_output_transform.cl" },
- { "winograd_output_transform_4x4_5x5_nchw", "winograd_output_transform.cl" },
- { "winograd_output_transform_4x1_5x1_nchw", "winograd_output_transform.cl" },
- { "winograd_output_transform_1x4_1x5_nchw", "winograd_output_transform.cl" },
- { "winograd_output_transform_4x1_3x1_nhwc", "winograd_output_transform.cl" },
- { "winograd_output_transform_1x4_1x3_nhwc", "winograd_output_transform.cl" },
- { "winograd_output_transform_4x4_3x3_nhwc", "winograd_output_transform.cl" },
- { "winograd_output_transform_4x4_5x5_nhwc", "winograd_output_transform.cl" },
- { "winograd_output_transform_4x1_5x1_nhwc", "winograd_output_transform.cl" },
- { "winograd_output_transform_1x4_1x5_nhwc", "winograd_output_transform.cl" },
- { "winograd_output_transform_2x2_7x7_nhwc", "winograd_output_transform.cl" },
- { "winograd_output_transform_2x1_7x1_nhwc", "winograd_output_transform.cl" },
- { "winograd_output_transform_1x2_1x7_nhwc", "winograd_output_transform.cl" },
- { "yolo_layer_nchw", "yolo_layer.cl" },
- { "yolo_layer_nhwc", "yolo_layer.cl" },
-};
-
-const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
+namespace arm_compute
{
-#ifdef EMBEDDED_KERNELS
- {
- "activation_layer.cl",
-#include "./cl_kernels/activation_layer.clembed"
- },
- {
- "activation_layer_quant.cl",
-#include "./cl_kernels/activation_layer_quant.clembed"
- },
- {
- "arg_min_max.cl",
-#include "./cl_kernels/arg_min_max.clembed"
- },
- {
- "batch_to_space.cl",
-#include "./cl_kernels/batch_to_space.clembed"
- },
- {
- "bitwise_op.cl",
-#include "./cl_kernels/bitwise_op.clembed"
- },
- {
- "bounding_box_transform.cl",
-#include "./cl_kernels/bounding_box_transform.clembed"
- },
- {
- "bounding_box_transform_quantized.cl",
-#include "./cl_kernels/bounding_box_transform_quantized.clembed"
- },
- {
- "channel_shuffle.cl",
-#include "./cl_kernels/channel_shuffle.clembed"
- },
- {
- "col2im.cl",
-#include "./cl_kernels/col2im.clembed"
- },
- {
- "comparisons.cl",
-#include "./cl_kernels/comparisons.clembed"
- },
- {
- "concatenate.cl",
-#include "./cl_kernels/concatenate.clembed"
- },
- {
- "convert_fc_weights.cl",
-#include "./cl_kernels/convert_fc_weights.clembed"
- },
- {
- "convolution_layer.cl",
-#include "./cl_kernels/convolution_layer.clembed"
- },
- {
- "copy_tensor.cl",
-#include "./cl_kernels/copy_tensor.clembed"
- },
- {
- "crop_tensor.cl",
-#include "./cl_kernels/crop_tensor.clembed"
- },
- {
- "upsample_layer.cl",
-#include "./cl_kernels/upsample_layer.clembed"
- },
- {
- "deconvolution_layer.cl",
-#include "./cl_kernels/deconvolution_layer.clembed"
- },
- {
- "depth_convert.cl",
-#include "./cl_kernels/depth_convert.clembed"
- },
- {
- "depth_to_space.cl",
-#include "./cl_kernels/depth_to_space.clembed"
- },
- {
- "depthwise_convolution.cl",
-#include "./cl_kernels/depthwise_convolution.clembed"
- },
- {
- "depthwise_convolution_quantized.cl",
-#include "./cl_kernels/depthwise_convolution_quantized.clembed"
- },
- {
- "dequantization_layer.cl",
-#include "./cl_kernels/dequantization_layer.clembed"
- },
- {
- "direct_convolution1x1.cl",
-#include "./cl_kernels/direct_convolution1x1.clembed"
- },
- {
- "direct_convolution3x3.cl",
-#include "./cl_kernels/direct_convolution3x3.clembed"
- },
- {
- "direct_convolution5x5.cl",
-#include "./cl_kernels/direct_convolution5x5.clembed"
- },
- {
- "direct_convolution_quantized.cl",
-#include "./cl_kernels/direct_convolution_quantized.clembed"
- },
- {
- "direct_convolution.cl",
-#include "./cl_kernels/direct_convolution.clembed"
- },
- {
- "elementwise_operation.cl",
-#include "./cl_kernels/elementwise_operation.clembed"
- },
- {
- "elementwise_operation_quantized.cl",
-#include "./cl_kernels/elementwise_operation_quantized.clembed"
- },
- {
- "elementwise_unary.cl",
-#include "./cl_kernels/elementwise_unary.clembed"
- },
- {
- "fft.cl",
-#include "./cl_kernels/fft.clembed"
- },
- {
- "fft_digit_reverse.cl",
-#include "./cl_kernels/fft_digit_reverse.clembed"
- },
- {
- "fft_scale.cl",
-#include "./cl_kernels/fft_scale.clembed"
- },
- {
- "fill_border.cl",
-#include "./cl_kernels/fill_border.clembed"
- },
- {
- "floor.cl",
-#include "./cl_kernels/floor.clembed"
- },
- {
- "gather.cl",
-#include "./cl_kernels/gather.clembed"
- },
- {
- "gemm.cl",
-#include "./cl_kernels/gemm.clembed"
- },
- {
- "gemm_v1.cl",
-#include "./cl_kernels/gemm_v1.clembed"
- },
- {
- "gemmlowp.cl",
-#include "./cl_kernels/gemmlowp.clembed"
- },
- {
- "gemv.cl",
-#include "./cl_kernels/gemv.clembed"
- },
- {
- "generate_proposals.cl",
-#include "./cl_kernels/generate_proposals.clembed"
- },
- {
- "generate_proposals_quantized.cl",
-#include "./cl_kernels/generate_proposals_quantized.clembed"
- },
- {
- "helpers.h",
-#include "./cl_kernels/helpers.hembed"
- },
- {
- "helpers_asymm.h",
-#include "./cl_kernels/helpers_asymm.hembed"
- },
- {
- "im2col.cl",
-#include "./cl_kernels/im2col.clembed"
- },
- {
- "instance_normalization.cl",
-#include "./cl_kernels/instance_normalization.clembed"
- },
- {
- "l2_normalize.cl",
-#include "./cl_kernels/l2_normalize.clembed"
- },
- {
- "mean_stddev_normalization.cl",
-#include "./cl_kernels/mean_stddev_normalization.clembed"
- },
- {
- "memset.cl",
-#include "./cl_kernels/memset.clembed"
- },
- {
- "minmax_layer.cl",
-#include "./cl_kernels/minmax_layer.clembed"
- },
- {
- "nonmax.cl",
-#include "./cl_kernels/nonmax.clembed"
- },
- {
- "normalization_layer.cl",
-#include "./cl_kernels/normalization_layer.clembed"
- },
- {
- "normalize_planar_yuv_layer.cl",
-#include "./cl_kernels/normalize_planar_yuv_layer.clembed"
- },
- {
- "normalize_planar_yuv_layer_quantized.cl",
-#include "./cl_kernels/normalize_planar_yuv_layer_quantized.clembed"
- },
- {
- "batchnormalization_layer.cl",
-#include "./cl_kernels/batchnormalization_layer.clembed"
- },
- {
- "pad_layer.cl",
-#include "./cl_kernels/pad_layer.clembed"
- },
- {
- "permute.cl",
-#include "./cl_kernels/permute.clembed"
- },
- {
- "pixelwise_mul_float.cl",
-#include "./cl_kernels/pixelwise_mul_float.clembed"
- },
- {
- "pixelwise_mul_int.cl",
-#include "./cl_kernels/pixelwise_mul_int.clembed"
- },
- {
- "pooling_layer.cl",
-#include "./cl_kernels/pooling_layer.clembed"
- },
- {
- "pooling_layer_quantized.cl",
-#include "./cl_kernels/pooling_layer_quantized.clembed"
- },
- {
- "prior_box_layer.cl",
-#include "./cl_kernels/prior_box_layer.clembed"
- },
- {
- "qlstm_layer_normalization.cl",
-#include "./cl_kernels/qlstm_layer_normalization.clembed"
- },
- {
- "quantization_layer.cl",
-#include "./cl_kernels/quantization_layer.clembed"
- },
- {
- "range.cl",
-#include "./cl_kernels/range.clembed"
- },
- {
- "reduction_operation.cl",
-#include "./cl_kernels/reduction_operation.clembed"
- },
- {
- "remap.cl",
-#include "./cl_kernels/remap.clembed"
- },
- {
- "reorg_layer.cl",
-#include "./cl_kernels/reorg_layer.clembed"
- },
- {
- "reshape_layer.cl",
-#include "./cl_kernels/reshape_layer.clembed"
- },
- {
- "reverse.cl",
-#include "./cl_kernels/reverse.clembed"
- },
- {
- "roi_align_layer.cl",
-#include "./cl_kernels/roi_align_layer.clembed"
- },
- {
- "roi_align_layer_quantized.cl",
-#include "./cl_kernels/roi_align_layer_quantized.clembed"
- },
- {
- "roi_pooling_layer.cl",
-#include "./cl_kernels/roi_pooling_layer.clembed"
- },
- {
- "scale.cl",
-#include "./cl_kernels/scale.clembed"
- },
- {
- "scale_quantized.cl",
-#include "./cl_kernels/scale_quantized.clembed"
- },
- {
- "select.cl",
-#include "./cl_kernels/select.clembed"
- },
- {
- "softmax_layer.cl",
-#include "./cl_kernels/softmax_layer.clembed"
- },
- {
- "softmax_layer_quantized.cl",
-#include "./cl_kernels/softmax_layer_quantized.clembed"
- },
- {
- "slice_ops.cl",
-#include "./cl_kernels/slice_ops.clembed"
- },
- {
- "space_to_batch.cl",
-#include "./cl_kernels/space_to_batch.clembed"
- },
- {
- "space_to_depth.cl",
-#include "./cl_kernels/space_to_depth.clembed"
- },
- {
- "stack_layer.cl",
-#include "./cl_kernels/stack_layer.clembed"
- },
- {
- "tile.cl",
-#include "./cl_kernels/tile.clembed"
- },
- {
- "transpose.cl",
-#include "./cl_kernels/transpose.clembed"
- },
- {
- "types.h",
-#include "./cl_kernels/types.hembed"
- },
- {
- "unpooling_layer.cl",
-#include "./cl_kernels/unpooling_layer.clembed"
- },
- {
- "winograd_filter_transform.cl",
-#include "./cl_kernels/winograd_filter_transform.clembed"
- },
- {
- "winograd_input_transform.cl",
-#include "./cl_kernels/winograd_input_transform.clembed"
- },
- {
- "winograd_output_transform.cl",
-#include "./cl_kernels/winograd_output_transform.clembed"
- },
- {
- "yolo_layer.cl",
-#include "./cl_kernels/yolo_layer.clembed"
- },
-#endif /* EMBEDDED_KERNELS */
-};
-
-CLKernelLibrary::CLKernelLibrary()
- : _compile_context(), _kernel_path(), _decompressed_source_map()
+CLKernelLibrary::CLKernelLibrary() : _compile_context()
{
opencl_is_available(); // Make sure the OpenCL symbols are initialised *before* the CLKernelLibrary is built
}
-
CLKernelLibrary &CLKernelLibrary::get()
{
static CLKernelLibrary _kernel_library;
return _kernel_library;
}
-
-Kernel CLKernelLibrary::create_kernel(const std::string &kernel_name, const std::set<std::string> &build_options_set) const
+Kernel CLKernelLibrary::create_kernel(const std::string &kernel_name,
+ const std::set<std::string> &build_options_set) const
{
- const std::string program_name = get_program_name(kernel_name);
- auto program = get_program(program_name);
-
- return _compile_context.create_kernel(kernel_name, program_name, program.first, _kernel_path, build_options_set, program.second);
+ const opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get();
+ const std::string program_name = klib.program_name(kernel_name);
+ auto program = klib.program(program_name);
+ const std::string &kernel_path = CLKernelLibrary::get().get_kernel_path();
+ return _compile_context.create_kernel(kernel_name, program_name, program.program, kernel_path, build_options_set,
+ program.is_binary);
}
-
std::string CLKernelLibrary::get_program_name(const std::string &kernel_name) const
{
- // Find which program contains the kernel
- auto kernel_program_it = _kernel_program_map.find(kernel_name);
-
- if(_kernel_program_map.end() == kernel_program_it)
- {
- ARM_COMPUTE_ERROR_VAR("Kernel %s not found in the CLKernelLibrary", kernel_name.c_str());
- }
-
- const std::string program_name = kernel_program_it->second;
-
- return program_name;
+ return opencl::ClKernelLibrary::get().program_name(kernel_name);
}
-
void CLKernelLibrary::init(std::string kernel_path, cl::Context context, cl::Device device)
{
_compile_context = CLCompileContext(context, device);
- _kernel_path = kernel_path;
+ opencl::ClKernelLibrary::get().set_kernel_path(kernel_path);
}
-
void CLKernelLibrary::set_kernel_path(const std::string &kernel_path)
{
- _kernel_path = std::move(kernel_path);
+ opencl::ClKernelLibrary::get().set_kernel_path(kernel_path);
}
-
cl::Context &CLKernelLibrary::context()
{
return _compile_context.context();
}
-
const cl::Device &CLKernelLibrary::get_device()
{
return _compile_context.get_device();
}
-
void CLKernelLibrary::set_device(cl::Device device)
{
_compile_context.set_device(device);
}
-
void CLKernelLibrary::set_context(cl::Context context)
{
_compile_context.set_context(context);
}
-
std::string CLKernelLibrary::get_kernel_path()
{
- return _kernel_path;
+ return opencl::ClKernelLibrary::get().kernel_path();
}
-
void CLKernelLibrary::clear_programs_cache()
{
_compile_context.clear_programs_cache();
}
-
const std::map<std::string, cl::Program> &CLKernelLibrary::get_built_programs() const
{
return _compile_context.get_built_programs();
}
-
void CLKernelLibrary::add_built_program(const std::string &built_program_name, const cl::Program &program)
{
_compile_context.add_built_program(built_program_name, program);
}
-
bool CLKernelLibrary::fp16_supported() const
{
return _compile_context.fp16_supported();
}
-
bool CLKernelLibrary::int64_base_atomics_supported() const
{
return _compile_context.int64_base_atomics_supported();
}
-
bool CLKernelLibrary::is_wbsm_supported()
{
return _compile_context.is_wbsm_supported();
}
-
std::pair<std::string, bool> CLKernelLibrary::get_program(const std::string &program_name) const
{
-#ifdef EMBEDDED_KERNELS
-#ifdef ARM_COMPUTE_COMPRESSED_KERNELS
- const auto inflatted_program_source_it = _decompressed_source_map.find(program_name);
- if(inflatted_program_source_it != _decompressed_source_map.end())
- {
- return std::make_pair(inflatted_program_source_it->second, false);
- }
-#endif /* ARM_COMPUTE_COMPRESSED_KERNELS */
-
- const auto program_source_it = _program_source_map.find(program_name);
- if(program_source_it == _program_source_map.end())
- {
- ARM_COMPUTE_ERROR_VAR("Embedded program for %s does not exist.", program_name.c_str());
- }
- std::string program_source = program_source_it->second;
-
-#ifdef ARM_COMPUTE_COMPRESSED_KERNELS
- std::string decompressed_program_source = decompress_zlib(decode_base64(program_source_it->second));
- ARM_COMPUTE_ERROR_ON_MSG(decompressed_program_source.empty(), "Cannot de-compress requested program");
- _decompressed_source_map.insert(std::make_pair(program_name, decompressed_program_source));
- program_source = std::move(decompressed_program_source);
-#endif /* ARM_COMPUTE_COMPRESSED_KERNELS */
-
- return std::make_pair(program_source, false);
-#else /* EMBEDDED_KERNELS */
- // Check for binary
- std::string source_name = _kernel_path + program_name;
- std::string binary_name = source_name + "bin";
- std::string program_source{};
- bool is_binary = false;
-
- if(std::ifstream(binary_name).is_open())
- {
- program_source = read_file(binary_name, true);
- is_binary = true;
- }
- else if(std::ifstream(source_name).is_open())
- {
- program_source = read_file(source_name, false);
- }
- else
- {
- ARM_COMPUTE_ERROR_VAR("Kernel file %s does not exist.", source_name.c_str());
- }
-
- return std::make_pair(program_source, is_binary);
-#endif /* EMBEDDED_KERNELS */
+ auto program_info = opencl::ClKernelLibrary::get().program(program_name);
+ return std::make_pair(std::move(program_info.program), program_info.is_binary);
}
-
size_t CLKernelLibrary::max_local_workgroup_size(const cl::Kernel &kernel) const
{
return _compile_context.max_local_workgroup_size(kernel);
}
-
cl::NDRange CLKernelLibrary::default_ndrange() const
{
return _compile_context.default_ndrange();
}
-
std::string CLKernelLibrary::get_device_version()
{
return _compile_context.get_device_version();
}
-
cl_uint CLKernelLibrary::get_num_compute_units()
{
return _compile_context.get_num_compute_units();
}
-
CLCompileContext &CLKernelLibrary::get_compile_context()
{
return _compile_context;
}
+} // namespace arm_compute
diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h
index 22c9cd9c0c..9a681a4f45 100644
--- a/src/core/CL/CLKernels.h
+++ b/src/core/CL/CLKernels.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CLKERNELS_H
-#define ARM_COMPUTE_CLKERNELS_H
+#ifndef ACL_SRC_CORE_CL_CLKERNELS_H
+#define ACL_SRC_CORE_CL_CLKERNELS_H
/* Header regrouping all the CL kernels */
#include "src/core/CL/kernels/CLArgMinMaxLayerKernel.h"
@@ -31,75 +31,38 @@
#include "src/core/CL/kernels/CLBitwiseKernel.h"
#include "src/core/CL/kernels/CLBoundingBoxTransformKernel.h"
#include "src/core/CL/kernels/CLChannelShuffleLayerKernel.h"
-#include "src/core/CL/kernels/CLCol2ImKernel.h"
#include "src/core/CL/kernels/CLComparisonKernel.h"
-#include "src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h"
#include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
#include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLDepthToSpaceLayerKernel.h"
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h"
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h"
#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h"
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h"
-#include "src/core/CL/kernels/CLDequantizationLayerKernel.h"
-#include "src/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
#include "src/core/CL/kernels/CLFFTDigitReverseKernel.h"
#include "src/core/CL/kernels/CLFFTRadixStageKernel.h"
#include "src/core/CL/kernels/CLFFTScaleKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLFuseBatchNormalizationKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
#include "src/core/CL/kernels/CLGatherKernel.h"
#include "src/core/CL/kernels/CLGenerateProposalsLayerKernel.h"
-#include "src/core/CL/kernels/CLIm2ColKernel.h"
#include "src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h"
#include "src/core/CL/kernels/CLL2NormalizeLayerKernel.h"
-#include "src/core/CL/kernels/CLLKTrackerKernel.h"
#include "src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h"
#include "src/core/CL/kernels/CLMeanStdDevNormalizationKernel.h"
-#include "src/core/CL/kernels/CLMinMaxLayerKernel.h"
#include "src/core/CL/kernels/CLNormalizationLayerKernel.h"
#include "src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h"
#include "src/core/CL/kernels/CLPadLayerKernel.h"
-#include "src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h"
#include "src/core/CL/kernels/CLPriorBoxLayerKernel.h"
#include "src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h"
-#include "src/core/CL/kernels/CLQuantizationLayerKernel.h"
-#include "src/core/CL/kernels/CLROIAlignLayerKernel.h"
-#include "src/core/CL/kernels/CLROIPoolingLayerKernel.h"
#include "src/core/CL/kernels/CLRangeKernel.h"
#include "src/core/CL/kernels/CLReductionOperationKernel.h"
-#include "src/core/CL/kernels/CLRemapKernel.h"
#include "src/core/CL/kernels/CLReorgLayerKernel.h"
#include "src/core/CL/kernels/CLReverseKernel.h"
-#include "src/core/CL/kernels/CLScaleKernel.h"
+#include "src/core/CL/kernels/CLROIAlignLayerKernel.h"
+#include "src/core/CL/kernels/CLROIPoolingLayerKernel.h"
#include "src/core/CL/kernels/CLSelectKernel.h"
-#include "src/core/CL/kernels/CLSoftmaxLayerKernel.h"
#include "src/core/CL/kernels/CLSpaceToBatchLayerKernel.h"
#include "src/core/CL/kernels/CLSpaceToDepthLayerKernel.h"
#include "src/core/CL/kernels/CLStackLayerKernel.h"
#include "src/core/CL/kernels/CLStridedSliceKernel.h"
#include "src/core/CL/kernels/CLTileKernel.h"
-#include "src/core/CL/kernels/CLTransposeKernel.h"
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
-#include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h"
-#include "src/core/CL/kernels/CLWinogradInputTransformKernel.h"
-#include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h"
-#include "src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h"
-#endif /* ARM_COMPUTE_CLKERNELS_H */
+#endif // ACL_SRC_CORE_CL_CLKERNELS_H
diff --git a/src/core/CL/CLMutableCommandBuffer.cpp b/src/core/CL/CLMutableCommandBuffer.cpp
new file mode 100644
index 0000000000..0e078d8416
--- /dev/null
+++ b/src/core/CL/CLMutableCommandBuffer.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/core/CL/CLMutableCommandBuffer.h"
+
+#include "arm_compute/core/Error.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/CL/CLUtils.h"
+
+namespace arm_compute
+{
+
+CLMutableCommandBuffer::CLMutableCommandBuffer(cl_command_queue queue) : CLCommandBuffer()
+{
+ cl_int status = CL_SUCCESS;
+
+ cl_command_buffer_properties_khr properties[] = {
+ CL_COMMAND_BUFFER_FLAGS_KHR,
+ CL_COMMAND_BUFFER_MUTABLE_KHR,
+ 0,
+ };
+
+ _cb = clCreateCommandBufferKHR(1, &queue, properties, &status);
+ handle_cl_error("clCreateCommandBufferKHR", status);
+}
+
+CLMutableCommandBuffer::~CLMutableCommandBuffer()
+{
+ const auto status = clReleaseCommandBufferKHR(_cb);
+ if (status != CL_SUCCESS)
+ {
+ const std::string error_message = "clReleaseCommandBufferKHR - Error code: " + std::to_string(status);
+ ARM_COMPUTE_LOG_ERROR_ACL(error_message);
+ }
+}
+
+void CLMutableCommandBuffer::add_kernel(cl_kernel kernel,
+ const cl::NDRange &offset,
+ const cl::NDRange &global,
+ const cl::NDRange &local)
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Created);
+
+ cl_mutable_command_khr mutable_handle = nullptr;
+
+ cl_ndrange_kernel_command_properties_khr properties[] = {
+ CL_MUTABLE_DISPATCH_UPDATABLE_FIELDS_KHR,
+ CL_MUTABLE_DISPATCH_ARGUMENTS_KHR,
+ 0,
+ };
+
+ const auto error = clCommandNDRangeKernelKHR(
+ _cb, nullptr, properties, kernel, global.dimensions(), offset.dimensions() != 0 ? offset.get() : nullptr,
+ global.get(), local.dimensions() != 0 ? local.get() : nullptr, 0, nullptr, nullptr, &mutable_handle);
+
+ handle_cl_error("clCommandNDRangeKernelKHR", error);
+
+ cl_mutable_dispatch_config_khr mut_dispatch_cfg{};
+ mut_dispatch_cfg.type = CL_STRUCTURE_TYPE_MUTABLE_DISPATCH_CONFIG_KHR;
+ mut_dispatch_cfg.command = mutable_handle;
+
+ _mut_dispatch_cfgs.emplace_back(mut_dispatch_cfg);
+}
+
+void CLMutableCommandBuffer::add_mutable_argument_generic(cl_uint arg_idx, const void *value, size_t size)
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Created);
+
+ cl_mutable_dispatch_arg_khr cfg{};
+ cfg.arg_index = arg_idx;
+ cfg.arg_size = size;
+ cfg.arg_value = value;
+
+ _mut_arg_cfgs.emplace_back(cfg);
+ ++_mut_dispatch_cfgs.back().num_args;
+}
+
+void CLMutableCommandBuffer::finalize()
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Created);
+
+ const auto error = clFinalizeCommandBufferKHR(_cb);
+ handle_cl_error("clFinalizeCommandBufferKHR", error);
+
+ state(State::Finalized);
+
+ _mut_dispatch_cfgs.shrink_to_fit();
+ _mut_arg_cfgs.shrink_to_fit();
+
+ size_t arg_no = 0;
+
+ for (auto &mut_dispatch_cfg : _mut_dispatch_cfgs)
+ {
+ ARM_COMPUTE_ERROR_ON(arg_no >= _mut_arg_cfgs.size());
+ mut_dispatch_cfg.arg_list = &_mut_arg_cfgs[arg_no];
+
+ arg_no += mut_dispatch_cfg.num_args;
+ }
+
+ _mut_cfg.type = CL_STRUCTURE_TYPE_MUTABLE_BASE_CONFIG_KHR;
+ _mut_cfg.next = nullptr;
+ _mut_cfg.num_mutable_dispatch = _mut_dispatch_cfgs.size();
+ _mut_cfg.mutable_dispatch_list = &_mut_dispatch_cfgs[0];
+}
+
+void CLMutableCommandBuffer::update()
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Finalized);
+
+ const auto error = clUpdateMutableCommandsKHR(_cb, &_mut_cfg);
+
+ handle_cl_error("clUpdateMutableCommandsKHR", error);
+}
+
+void CLMutableCommandBuffer::enqueue()
+{
+ ARM_COMPUTE_ERROR_ON(state() != State::Finalized);
+
+ const auto error = clEnqueueCommandBufferKHR(0, nullptr, _cb, 0, nullptr, nullptr);
+
+ handle_cl_error("clEnqueueCommandBufferKHR", error);
+}
+
+bool CLMutableCommandBuffer::is_finalized() const
+{
+ return state() == State::Finalized;
+}
+
+} // namespace arm_compute
diff --git a/src/core/CL/CLMutableCommandBuffer.h b/src/core/CL/CLMutableCommandBuffer.h
new file mode 100644
index 0000000000..8997d7d1fd
--- /dev/null
+++ b/src/core/CL/CLMutableCommandBuffer.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_SRC_CORE_CL_CLMUTABLECOMMANDBUFFER_H
+#define ACL_SRC_CORE_CL_CLMUTABLECOMMANDBUFFER_H
+
+#include "src/core/CL/CLCommandBuffer.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+
+/** Command buffer implementaton based on CL mutable dispatch command buffer extension. */
+class CLMutableCommandBuffer : public CLCommandBuffer
+{
+public:
+ /** Create a new mutable dispatch command buffer targeting the specified command queue.
+ *
+ * @param[in] queue The command queue to execute the command buffer.
+ */
+ CLMutableCommandBuffer(cl_command_queue queue);
+
+ /** Destructor. */
+ virtual ~CLMutableCommandBuffer();
+
+ /** Disallow copy constructor. */
+ CLMutableCommandBuffer(const CLMutableCommandBuffer &) = delete;
+
+ /** Disallow copy assignment. */
+ CLMutableCommandBuffer &operator=(const CLMutableCommandBuffer &) = delete;
+
+ /** Disallow move constructor. */
+ CLMutableCommandBuffer(CLMutableCommandBuffer &&) = delete;
+
+ /** Disallow move assignment. */
+ CLMutableCommandBuffer &operator=(CLMutableCommandBuffer &&) = delete;
+
+ void add_kernel(cl_kernel kernel,
+ const cl::NDRange &offset,
+ const cl::NDRange &global,
+ const cl::NDRange &local) override;
+
+ void finalize() override;
+
+ void update() override;
+
+ void enqueue() override;
+
+ bool is_finalized() const override;
+
+protected:
+ void add_mutable_argument_generic(cl_uint arg_idx, const void *value, size_t size) override;
+
+private:
+ cl_command_buffer_khr _cb{};
+ cl_mutable_base_config_khr _mut_cfg{};
+ std::vector<cl_mutable_dispatch_config_khr> _mut_dispatch_cfgs{};
+ std::vector<cl_mutable_dispatch_arg_khr> _mut_arg_cfgs{};
+};
+
+} // namespace arm_compute
+
+#endif // ACL_SRC_CORE_CL_CLMUTABLECOMMANDBUFFER_H
diff --git a/src/core/CL/CLTracePoint.cpp b/src/core/CL/CLTracePoint.cpp
deleted file mode 100644
index c3d4899740..0000000000
--- a/src/core/CL/CLTracePoint.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/TracePoint.h"
-
-#include "arm_compute/core/CL/CLTypes.h"
-#include "arm_compute/core/CL/ICLArray.h"
-#include "arm_compute/core/CL/ICLDistribution1D.h"
-#include "arm_compute/core/CL/ICLHOG.h"
-#include "arm_compute/core/CL/ICLLut.h"
-#include "arm_compute/core/CL/ICLMultiHOG.h"
-#include "arm_compute/core/CL/ICLMultiImage.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "utils/TypePrinter.h"
-
-#include <vector>
-
-namespace arm_compute
-{
-std::string to_string(const ICLTensor &arg)
-{
- std::stringstream str;
- str << "TensorInfo(" << *arg.info() << ")";
- return str.str();
-}
-
-template <>
-TracePoint::Args &&operator<<(TracePoint::Args &&tp, const ICLTensor *arg)
-{
- tp.args.push_back("ICLTensor(" + to_string_if_not_null(arg) + ")");
- return std::move(tp);
-}
-
-ARM_COMPUTE_TRACE_TO_STRING(std::vector<ICLTensor *>)
-ARM_COMPUTE_TRACE_TO_STRING(ICLMultiImage)
-ARM_COMPUTE_TRACE_TO_STRING(ICLDetectionWindowArray)
-ARM_COMPUTE_TRACE_TO_STRING(ICLKeyPointArray)
-ARM_COMPUTE_TRACE_TO_STRING(ICLLKInternalKeypointArray)
-ARM_COMPUTE_TRACE_TO_STRING(ICLCoefficientTableArray)
-ARM_COMPUTE_TRACE_TO_STRING(ICLCoordinates2DArray)
-ARM_COMPUTE_TRACE_TO_STRING(ICLOldValArray)
-ARM_COMPUTE_TRACE_TO_STRING(cl::Buffer)
-ARM_COMPUTE_TRACE_TO_STRING(ICLDistribution1D)
-ARM_COMPUTE_TRACE_TO_STRING(ICLMultiHOG)
-ARM_COMPUTE_TRACE_TO_STRING(ICLHOG)
-ARM_COMPUTE_TRACE_TO_STRING(ICLLut)
-ARM_COMPUTE_TRACE_TO_STRING(ICLSize2DArray)
-ARM_COMPUTE_TRACE_TO_STRING(std::vector<const ICLTensor *>)
-
-ARM_COMPUTE_CONST_PTR_CLASS(std::vector<ICLTensor *>)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLMultiImage)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLDetectionWindowArray)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLKeyPointArray)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLLKInternalKeypointArray)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLCoefficientTableArray)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLCoordinates2DArray)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLOldValArray)
-ARM_COMPUTE_CONST_PTR_CLASS(cl::Buffer)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLDistribution1D)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLMultiHOG)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLHOG)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLLut)
-ARM_COMPUTE_CONST_PTR_CLASS(ICLSize2DArray)
-ARM_COMPUTE_CONST_PTR_CLASS(std::vector<const ICLTensor *>)
-} // namespace arm_compute
diff --git a/src/core/CL/CLUtils.cpp b/src/core/CL/CLUtils.cpp
index 67af240044..290ed32648 100644
--- a/src/core/CL/CLUtils.cpp
+++ b/src/core/CL/CLUtils.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,16 +21,60 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Types.h"
-
#include "src/core/CL/CLUtils.h"
-cl::Image2D arm_compute::create_image2d_from_buffer(const cl::Context &ctx, const cl::Buffer &buffer, const TensorShape &shape2d, DataType data_type, size_t image_row_pitch)
+#include "arm_compute/core/CL/CLCompileContext.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/utils/ActivationFunctionUtils.h"
+#include "arm_compute/core/utils/StringUtils.h"
+#include "arm_compute/core/Validate.h"
+
+#include "support/StringSupport.h"
+
+namespace arm_compute
+{
+cl::Image2D create_image2d_from_tensor(const ICLTensor *tensor, CLImage2DType image_type)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
+
+ const cl::Context &ctx = CLKernelLibrary::get().context();
+ const cl::Buffer &buffer = tensor->cl_buffer();
+ const ITensorInfo *info = tensor->info();
+ ARM_COMPUTE_ERROR_ON_MSG(info->lock_paddings(), "Tensor paddings must not be locked to allow extending paddings to "
+ "satisfy cl_image pitch alignment requirement");
+
+ const size_t image_w{info->dimension(0) / 4};
+ const size_t image_h{info->tensor_shape().total_size() / info->dimension(0)};
+ const size_t max_image_w{CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>()};
+ const size_t max_image_h{CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>()};
+
+ ARM_COMPUTE_UNUSED(max_image_w, max_image_h);
+ ARM_COMPUTE_ERROR_ON_MSG(image_w > max_image_w, "Image width exceeds maximum width for exporting to cl_image");
+ ARM_COMPUTE_ERROR_ON_MSG(image_h > max_image_h, "Image height exceeds maximum height for exporting to cl_image");
+
+ const TensorShape shape2d(image_w, image_h);
+ const size_t image_row_pitch = info->strides_in_bytes()[1];
+
+ return create_image2d_from_buffer(ctx, buffer, shape2d, info->data_type(), image_row_pitch, image_type);
+}
+
+cl::Image2D create_image2d_from_buffer(const cl::Context &ctx,
+ const cl::Buffer &buffer,
+ const TensorShape &shape2d,
+ DataType data_type,
+ size_t image_row_pitch,
+ CLImage2DType image_type)
{
+ ARM_COMPUTE_ERROR_ON_MSG(!image2d_from_buffer_supported(CLKernelLibrary::get().get_device()),
+ "The extension cl_khr_image2d_from_buffer is not supported on the target platform");
+ ARM_COMPUTE_ERROR_ON_MSG(get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) == 0,
+ "Impossible to retrieve the cl_image pitch alignment");
+ ARM_COMPUTE_ERROR_ON_MSG(buffer.get() == nullptr, "Cannot create cl_image from empty cl_buffer");
+
cl_channel_type cl_data_type;
- switch(data_type)
+ switch (data_type)
{
case DataType::F32:
cl_data_type = CL_FLOAT;
@@ -45,7 +89,7 @@ cl::Image2D arm_compute::create_image2d_from_buffer(const cl::Context &ctx, cons
cl_mem cl_image;
cl_int err = CL_SUCCESS;
- const cl_image_format format = { CL_RGBA, cl_data_type };
+ const cl_image_format format = {CL_RGBA, cl_data_type};
cl_image_desc desc;
memset(&desc, 0, sizeof(desc));
@@ -55,10 +99,31 @@ cl::Image2D arm_compute::create_image2d_from_buffer(const cl::Context &ctx, cons
desc.image_width = shape2d[0];
desc.image_height = shape2d[1];
- cl_image = clCreateImage(ctx(), CL_MEM_READ_ONLY, &format, &desc, nullptr, &err);
+ switch (image_type)
+ {
+ case CLImage2DType::ReadOnly:
+ cl_image = clCreateImage(ctx(), CL_MEM_READ_ONLY, &format, &desc, nullptr, &err);
+ break;
+ case CLImage2DType::WriteOnly:
+ cl_image = clCreateImage(ctx(), CL_MEM_WRITE_ONLY, &format, &desc, nullptr, &err);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported CLImage2DType");
+ }
ARM_COMPUTE_UNUSED(err);
ARM_COMPUTE_ERROR_ON_MSG(err != CL_SUCCESS, "Error during the creation of CL image from buffer");
return cl::Image2D(cl_image);
}
+
+void handle_cl_error(const std::string &function_name, cl_int error_code)
+{
+ if (error_code != CL_SUCCESS)
+ {
+ std::string error_message = function_name + " - Error code: " + std::to_string(error_code);
+ ARM_COMPUTE_ERROR(error_message.c_str());
+ }
+}
+
+} // namespace arm_compute
diff --git a/src/core/CL/CLUtils.h b/src/core/CL/CLUtils.h
index b65d547756..f9dcfeac3a 100644
--- a/src/core/CL/CLUtils.h
+++ b/src/core/CL/CLUtils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,14 +22,36 @@
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CL_CLUTILS_H
-#define ARM_COMPUTE_CL_CLUTILS_H
+#ifndef ACL_SRC_CORE_CL_CLUTILS_H
+#define ACL_SRC_CORE_CL_CLUTILS_H
#include "arm_compute/core/CL/OpenCL.h"
+#include <map>
+
namespace arm_compute
{
class TensorShape;
+class CLBuildOptions;
+class ITensorInfo;
+class ICLTensor;
+enum class DataType;
+
+/** OpenCL Image2D types */
+enum class CLImage2DType
+{
+ ReadOnly,
+ WriteOnly
+};
+
+/** Create a cl::Image2D object from a tensor
+ *
+ * @param[in] tensor Tensor from which to construct Image 2D object
+ * @param[in] image_type Image 2D type (@ref CLImage2DType)
+ *
+ * @return cl::Image2D object
+ */
+cl::Image2D create_image2d_from_tensor(const ICLTensor *tensor, CLImage2DType image_type);
/** Create a cl::Image2D object from an OpenCL buffer
*
@@ -46,11 +68,24 @@ class TensorShape;
* @param[in] shape2d 2D tensor shape
* @param[in] data_type DataType to use. Only supported: F32,F16
* @param[in] image_row_pitch Image row pitch (a.k.a. stride Y) to be used in the image2d object
+ * @param[in] image_type Image 2D type (@ref CLImage2DType)
*
* @return cl::Image2D object
*/
-cl::Image2D create_image2d_from_buffer(const cl::Context &ctx, const cl::Buffer &buffer, const TensorShape &shape2d, DataType data_type, size_t image_row_pitch);
+cl::Image2D create_image2d_from_buffer(const cl::Context &ctx,
+ const cl::Buffer &buffer,
+ const TensorShape &shape2d,
+ DataType data_type,
+ size_t image_row_pitch,
+ CLImage2DType image_type);
+
+/** Check for CL error code and throw exception accordingly.
+ *
+ * @param[in] function_name The name of the CL function being called.
+ * @param[in] error_code The error returned by the CL function.
+ */
+void handle_cl_error(const std::string &function_name, cl_int error_code);
-} // arm_compute
+} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_CLUTILS_H */
+#endif // ACL_SRC_CORE_CL_CLUTILS_H
diff --git a/src/core/CL/CLValidate.h b/src/core/CL/CLValidate.h
index 7b5294e452..50d224f1c0 100644
--- a/src/core/CL/CLValidate.h
+++ b/src/core/CL/CLValidate.h
@@ -29,11 +29,13 @@
namespace arm_compute
{
-#define ARM_COMPUTE_ERROR_ON_F16_UNSUPPORTED(tensor) \
- ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_unsupported_fp16(__func__, __FILE__, __LINE__, tensor, CLKernelLibrary::get().fp16_supported()))
+#define ARM_COMPUTE_ERROR_ON_F16_UNSUPPORTED(tensor) \
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_unsupported_fp16(__func__, __FILE__, __LINE__, tensor, \
+ CLKernelLibrary::get().fp16_supported()))
-#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor) \
- ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_unsupported_fp16(__func__, __FILE__, __LINE__, tensor, CLKernelLibrary::get().fp16_supported()))
+#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor) \
+ ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_unsupported_fp16(__func__, __FILE__, __LINE__, tensor, \
+ CLKernelLibrary::get().fp16_supported()))
/** Return an error if int64_base_atomics extension is not supported by the device.
*
@@ -43,11 +45,13 @@ namespace arm_compute
*
* @return Status
*/
-inline arm_compute::Status error_on_unsupported_int64_base_atomics(const char *function, const char *file, const int line)
+inline arm_compute::Status
+error_on_unsupported_int64_base_atomics(const char *function, const char *file, const int line)
{
- if(!CLKernelLibrary::get().int64_base_atomics_supported())
+ if (!CLKernelLibrary::get().int64_base_atomics_supported())
{
- return ARM_COMPUTE_CREATE_ERROR_LOC(arm_compute::ErrorCode::UNSUPPORTED_EXTENSION_USE, function, file, line, "Atomic functions are not supported");
+ return ARM_COMPUTE_CREATE_ERROR_LOC(arm_compute::ErrorCode::UNSUPPORTED_EXTENSION_USE, function, file, line,
+ "Atomic functions are not supported");
}
return arm_compute::Status{};
}
diff --git a/src/core/CL/DefaultLWSHeuristics.cpp b/src/core/CL/DefaultLWSHeuristics.cpp
new file mode 100644
index 0000000000..f96b24d2a9
--- /dev/null
+++ b/src/core/CL/DefaultLWSHeuristics.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2021-2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/CL/DefaultLWSHeuristics.h"
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+
+namespace
+{
+cl::NDRange get_gemm_lws(size_t gws_x, size_t gws_y, size_t gws_z)
+{
+ ARM_COMPUTE_UNUSED(gws_y);
+
+ if (gws_z != 1)
+ {
+ return cl::NDRange(4, 4, 2);
+ }
+ else
+ {
+ if (gws_x > 256)
+ {
+ return cl::NDRange(2, 16, 1);
+ }
+ else
+ {
+ return cl::NDRange(32, 4, 1);
+ }
+ }
+}
+
+cl::NDRange get_winograd_lws(size_t gws_x, size_t gws_y, size_t gws_z)
+{
+ ARM_COMPUTE_UNUSED(gws_x, gws_y, gws_z);
+
+ return cl::NDRange(4, 2, 1);
+}
+
+cl::NDRange get_direct_lws(size_t gws_x, size_t gws_y, size_t gws_z)
+{
+ ARM_COMPUTE_UNUSED(gws_z);
+
+ if (gws_x < gws_y)
+ {
+ if (gws_x < 4)
+ {
+ return cl::NDRange(std::min(gws_x, static_cast<size_t>(2u)), 32, 1);
+ }
+ else
+ {
+ return cl::NDRange(std::min(gws_x, static_cast<size_t>(4u)), 8, 1);
+ }
+ }
+ else
+ {
+ return cl::NDRange(8, 4, 1);
+ }
+}
+
+cl::NDRange get_dwc_lws(size_t gws_x, size_t gws_y, size_t gws_z)
+{
+ ARM_COMPUTE_UNUSED(gws_y);
+ ARM_COMPUTE_UNUSED(gws_z);
+
+ if (gws_x < 32)
+ {
+ return cl::NDRange(gws_x, 4, 4);
+ }
+ else
+ {
+ return cl::NDRange(8, 4, 2);
+ }
+}
+} // namespace
+
+namespace arm_compute
+{
+cl::NDRange get_default_lws_for_type(CLKernelType kernel_type, cl::NDRange gws)
+{
+ const size_t gws_x = gws[0];
+ const size_t gws_y = gws[1];
+ const size_t gws_z = gws[2];
+
+ switch (kernel_type)
+ {
+ case CLKernelType::GEMM:
+ {
+ return get_gemm_lws(gws_x, gws_y, gws_z);
+ }
+ case CLKernelType::DIRECT:
+ {
+ return get_direct_lws(gws_x, gws_y, gws_z);
+ }
+ case CLKernelType::WINOGRAD:
+ {
+ return get_winograd_lws(gws_x, gws_y, gws_z);
+ }
+ case CLKernelType::DEPTHWISE:
+ {
+ return get_dwc_lws(gws_x, gws_y, gws_z);
+ }
+ default:
+ {
+ return CLKernelLibrary::get().default_ndrange();
+ }
+ }
+}
+} // namespace arm_compute
diff --git a/src/core/CL/ICLMultiImage.cpp b/src/core/CL/DefaultLWSHeuristics.h
index 01b05fc036..e646b9acb8 100644
--- a/src/core/CL/ICLMultiImage.cpp
+++ b/src/core/CL/DefaultLWSHeuristics.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,19 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/CL/ICLMultiImage.h"
+#ifndef ARM_COMPUTE_DEFAULT_LWS_HEURISTICS_H
+#define ARM_COMPUTE_DEFAULT_LWS_HEURISTICS_H
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/CL/CLTypes.h"
+#include "arm_compute/core/CL/OpenCL.h"
-using namespace arm_compute;
-
-IImage *ICLMultiImage::plane(unsigned int index)
-{
- return cl_plane(index);
-}
-
-const IImage *ICLMultiImage::plane(unsigned int index) const
+namespace arm_compute
{
- return cl_plane(index);
-}
+cl::NDRange get_default_lws_for_type(CLKernelType kernel_type, cl::NDRange gws);
+} // namespace arm_compute
+#endif // ARM_COMPUTE_DEFAULT_LWS_HEURISTICS_H \ No newline at end of file
diff --git a/src/core/CL/ICLDistribution1D.cpp b/src/core/CL/ICLDistribution1D.cpp
deleted file mode 100644
index d185f1365f..0000000000
--- a/src/core/CL/ICLDistribution1D.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2016, 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/CL/ICLDistribution1D.h"
-
-#include "arm_compute/core/Error.h"
-
-using namespace arm_compute;
-
-ICLDistribution1D::ICLDistribution1D(size_t num_bins, int32_t offset, uint32_t range)
- : IDistribution1D(num_bins, offset, range), _mapping(nullptr)
-{
-}
-
-void ICLDistribution1D::map(cl::CommandQueue &q, bool blocking)
-{
- ARM_COMPUTE_ERROR_ON(_mapping != nullptr);
- _mapping = do_map(q, blocking);
-}
-
-void ICLDistribution1D::unmap(cl::CommandQueue &q)
-{
- ARM_COMPUTE_ERROR_ON(_mapping == nullptr);
- do_unmap(q);
- _mapping = nullptr;
-}
-
-uint32_t *ICLDistribution1D::buffer() const
-{
- return _mapping;
-}
diff --git a/src/core/CL/ICLGEMMKernelConfiguration.h b/src/core/CL/ICLGEMMKernelConfiguration.h
deleted file mode 100644
index ac0e7ab7ff..0000000000
--- a/src/core/CL/ICLGEMMKernelConfiguration.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_ICLGEMMKERNELCONFIGURATION_H
-#define ARM_COMPUTE_ICLGEMMKERNELCONFIGURATION_H
-
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-/** Basic interface for the GEMM kernel configuration */
-class ICLGEMMKernelConfiguration
-{
-public:
- /** Constructor
- *
- * @param[in] arch GPU target
- */
- ICLGEMMKernelConfiguration(GPUTarget arch)
- : _target(arch)
- {
- }
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- ICLGEMMKernelConfiguration(const ICLGEMMKernelConfiguration &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- ICLGEMMKernelConfiguration &operator=(const ICLGEMMKernelConfiguration &) = delete;
- /** Default Move Constructor. */
- ICLGEMMKernelConfiguration(ICLGEMMKernelConfiguration &&) = default;
- /** Default move assignment operator */
- ICLGEMMKernelConfiguration &operator=(ICLGEMMKernelConfiguration &&) = default;
- /** Virtual destructor */
- virtual ~ICLGEMMKernelConfiguration() = default;
- /** Given M, N, K and B, this method returns the @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo to be used
- *
- * @param[in] m Number of rows LHS matrix
- * @param[in] n Number of columns RHS matrix
- * @param[in] k Number of columns LHS matrix or number of rows RHS matrix
- * @param[in] b Batch size
- * @param[in] data_type Data type
- */
- virtual std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) = 0;
-
-protected:
- GPUTarget _target;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_ICLGEMMKERNELCONFIGURATION_H */
diff --git a/src/core/CL/ICLHOG.cpp b/src/core/CL/ICLHOG.cpp
deleted file mode 100644
index aaabe869b6..0000000000
--- a/src/core/CL/ICLHOG.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/CL/ICLHOG.h"
-
-using namespace arm_compute;
-
-ICLHOG::ICLHOG()
- : _mapping(nullptr)
-{
-}
-
-void ICLHOG::map(cl::CommandQueue &q, bool blocking)
-{
- _mapping = do_map(q, blocking);
-}
-
-void ICLHOG::unmap(cl::CommandQueue &q)
-{
- do_unmap(q);
- _mapping = nullptr;
-}
-
-float *ICLHOG::descriptor() const
-{
- return reinterpret_cast<float *>(_mapping);
-} \ No newline at end of file
diff --git a/src/core/CL/ICLKernel.cpp b/src/core/CL/ICLKernel.cpp
index 1c6963f3f1..ac53e7f1d2 100644
--- a/src/core/CL/ICLKernel.cpp
+++ b/src/core/CL/ICLKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,41 +25,41 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
+
#include "src/core/helpers/Utils.h"
#include <cstddef>
-void arm_compute::enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint, bool use_dummy_work_items)
+void arm_compute::enqueue(cl::CommandQueue &queue,
+ ICLKernel &kernel,
+ const Window &window,
+ const cl::NDRange &lws_hint,
+ bool use_dummy_work_items)
{
- if(kernel.kernel()() == nullptr)
+ if (kernel.kernel()() == nullptr)
{
return;
}
- for(unsigned int i = 0; i < Coordinates::num_max_dimensions; ++i)
+ for (unsigned int i = 0; i < Coordinates::num_max_dimensions; ++i)
{
ARM_COMPUTE_ERROR_ON(window[i].step() == 0);
// Make sure that dimensions > Z are 1
ARM_COMPUTE_ERROR_ON((i >= 3) && ((window[i].end() - window[i].start()) != 1));
}
- cl::NDRange gws = ICLKernel::gws_from_window(window);
+ cl::NDRange gws = ICLKernel::gws_from_window(window, use_dummy_work_items);
// Check for empty NDRange
- if(gws.dimensions() == 0)
+ if (gws.dimensions() == 0)
{
return;
}
- // Use dummy work-items
- if(use_dummy_work_items)
- {
- gws.get()[0] = get_next_power_two(gws[0]);
- gws.get()[1] = get_next_power_two(gws[1]);
- }
+ kernel.cache_gws(gws);
cl::NDRange valid_lws;
- if(lws_hint[0] * lws_hint[1] * lws_hint[2] > kernel.get_max_workgroup_size())
+ if (lws_hint[0] * lws_hint[1] * lws_hint[2] > kernel.get_max_workgroup_size())
{
valid_lws = cl::NullRange;
}
@@ -70,12 +70,12 @@ void arm_compute::enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Wind
cl::NDRange lws = cl::NullRange;
- if((valid_lws[0] <= gws[0]) && (valid_lws[1] <= gws[1]) && (valid_lws[2] <= gws[2]))
+ if ((valid_lws[0] <= gws[0]) && (valid_lws[1] <= gws[1]) && (valid_lws[2] <= gws[2]))
{
lws = valid_lws;
}
- if(CLKernelLibrary::get().is_wbsm_supported())
+ if (CLKernelLibrary::get().is_wbsm_supported())
{
set_wbsm(kernel.kernel(), kernel.wbsm_hint());
}
@@ -95,7 +95,7 @@ void ICLKernel::add_tensor_argument(unsigned &idx, const ICLTensor *tensor, cons
// Calculate offset to the start of the window
unsigned int offset_first_element = info->offset_first_element_in_bytes();
- for(unsigned int n = 0; n < info->num_dimensions(); ++n)
+ for (unsigned int n = 0; n < info->num_dimensions(); ++n)
{
offset_first_element += (window.is_broadcasted(n) ? 0 : window[n].start()) * strides[n];
}
@@ -103,24 +103,78 @@ void ICLKernel::add_tensor_argument(unsigned &idx, const ICLTensor *tensor, cons
unsigned int idx_start = idx;
_kernel.setArg(idx++, tensor->cl_buffer());
- for(unsigned int d = 0; d < dimension_size; ++d)
+ for (unsigned int d = 0; d < dimension_size; ++d)
{
- _kernel.setArg<cl_uint>(idx++, strides[d]);
- _kernel.setArg<cl_uint>(idx++, strides[d] * window[d].step());
+ _kernel.setArg<cl_uint>(idx++, window.is_broadcasted(d) ? 0 : strides[d]);
+ _kernel.setArg<cl_uint>(idx++, window.is_broadcasted(d) ? 0 : (strides[d] * window[d].step()));
}
_kernel.setArg<cl_uint>(idx++, offset_first_element);
ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_tensor<dimension_size>() != idx,
- "add_%dD_tensor_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_tensor<dimension_size>());
+ "add_%dD_tensor_argument() is supposed to add exactly %d arguments to the kernel",
+ dimension_size, num_arguments_per_tensor<dimension_size>());
ARM_COMPUTE_UNUSED(idx_start);
}
+void ICLKernel::add_3d_tensor_nhw_argument(unsigned int &idx, const ICLTensor *tensor)
+{
+ ARM_COMPUTE_ERROR_ON(tensor == nullptr);
+
+ const ITensorInfo *info = tensor->info();
+ ARM_COMPUTE_ERROR_ON(info == nullptr);
+ const Strides &strides = info->strides_in_bytes();
+
+ // Tensor poniter
+ _kernel.setArg(idx++, tensor->cl_buffer());
+
+ // Add stride_y, stride_z
+ _kernel.setArg<cl_uint>(idx++, strides[1]);
+ _kernel.setArg<cl_uint>(idx++, strides[2]);
+
+ // Tensor dimensions
+ _kernel.setArg<cl_uint>(idx++, info->dimension(0));
+ _kernel.setArg<cl_uint>(idx++, info->dimension(1));
+ _kernel.setArg<cl_uint>(idx++, info->dimension(2));
+
+ // Offset of first element
+ unsigned int offset_first_element = info->offset_first_element_in_bytes();
+ _kernel.setArg<cl_uint>(idx++, offset_first_element);
+}
+
+void ICLKernel::add_4d_tensor_nhwc_argument(unsigned int &idx, const ICLTensor *tensor)
+{
+ ARM_COMPUTE_ERROR_ON(tensor == nullptr);
+
+ const ITensorInfo *info = tensor->info();
+ ARM_COMPUTE_ERROR_ON(info == nullptr);
+ const Strides &strides = info->strides_in_bytes();
+
+ // Tensor poniter
+ _kernel.setArg(idx++, tensor->cl_buffer());
+
+ // Add stride_y, stride_z and stride_w
+ _kernel.setArg<cl_uint>(idx++, strides[1]);
+ _kernel.setArg<cl_uint>(idx++, strides[2]);
+ _kernel.setArg<cl_uint>(idx++, strides[3]);
+
+ // Tensor dimensions
+ _kernel.setArg<cl_uint>(idx++, info->dimension(0));
+ _kernel.setArg<cl_uint>(idx++, info->dimension(1));
+ _kernel.setArg<cl_uint>(idx++, info->dimension(2));
+ _kernel.setArg<cl_uint>(idx++, info->dimension(3));
+
+ // Offset of first element
+ unsigned int offset_first_element = info->offset_first_element_in_bytes();
+ _kernel.setArg<cl_uint>(idx++, offset_first_element);
+}
+
#ifndef DOXYGEN_SKIP_THIS
template void ICLKernel::add_tensor_argument<1>(unsigned &idx, const ICLTensor *tensor, const Window &window);
template void ICLKernel::add_tensor_argument<2>(unsigned &idx, const ICLTensor *tensor, const Window &window);
template void ICLKernel::add_tensor_argument<3>(unsigned &idx, const ICLTensor *tensor, const Window &window);
template void ICLKernel::add_tensor_argument<4>(unsigned &idx, const ICLTensor *tensor, const Window &window);
+template void ICLKernel::add_tensor_argument<5>(unsigned &idx, const ICLTensor *tensor, const Window &window);
#endif /* DOXYGEN_SKIP_THIS */
void ICLKernel::set_target(cl::Device &device)
@@ -130,16 +184,16 @@ void ICLKernel::set_target(cl::Device &device)
size_t ICLKernel::get_max_workgroup_size()
{
- if(_max_workgroup_size == 0)
+ if (_max_workgroup_size == 0)
{
_max_workgroup_size = CLKernelLibrary::get().max_local_workgroup_size(_kernel);
}
return _max_workgroup_size;
}
-cl::NDRange ICLKernel::gws_from_window(const Window &window)
+cl::NDRange ICLKernel::gws_from_window(const Window &window, bool use_dummy_work_items)
{
- if((window.x().end() - window.x().start()) == 0 || (window.y().end() - window.y().start()) == 0)
+ if ((window.x().end() - window.x().start()) == 0 || (window.y().end() - window.y().start()) == 0)
{
return cl::NullRange;
}
@@ -148,6 +202,22 @@ cl::NDRange ICLKernel::gws_from_window(const Window &window)
(window.y().end() - window.y().start()) / window.y().step(),
(window.z().end() - window.z().start()) / window.z().step());
+ if (use_dummy_work_items)
+ {
+ gws.get()[0] = get_next_power_two(gws[0]);
+ gws.get()[1] = get_next_power_two(gws[1]);
+ }
+
return gws;
}
-} // namespace arm_compute \ No newline at end of file
+
+cl::NDRange ICLKernel::get_cached_gws() const
+{
+ return _cached_gws;
+}
+
+void ICLKernel::cache_gws(const cl::NDRange &gws)
+{
+ _cached_gws = gws;
+}
+} // namespace arm_compute
diff --git a/src/core/CL/ICLKernel.h b/src/core/CL/ICLKernel.h
index 6737109f34..6aebef15a5 100644
--- a/src/core/CL/ICLKernel.h
+++ b/src/core/CL/ICLKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,21 +27,42 @@
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/CLTypes.h"
#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/experimental/Types.h"
#include "arm_compute/core/GPUTarget.h"
#include "arm_compute/core/IKernel.h"
#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/experimental/Types.h"
#include "arm_compute/runtime/CL/CLTuningParams.h"
+#include "src/core/CL/DefaultLWSHeuristics.h"
+
#include <string>
namespace arm_compute
{
+namespace
+{
+bool is_same_lws(cl::NDRange lws0, cl::NDRange lws1)
+{
+ if (lws0.dimensions() != lws1.dimensions())
+ {
+ return false;
+ }
+
+ for (size_t i = 0; i < lws0.dimensions(); ++i)
+ {
+ if (lws0.get()[i] != lws1.get()[i])
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+} // namespace
template <typename T>
class ICLArray;
class ICLTensor;
class Window;
-
/** Common interface for all the OpenCL kernels */
class ICLKernel : public IKernel
{
@@ -50,7 +71,7 @@ private:
*
* @return The number of arguments enqueued per array object.
*/
- template <unsigned int dimension_size>
+ template <unsigned int dimension_size>
constexpr static unsigned int num_arguments_per_array()
{
return num_arguments_per_tensor<dimension_size>();
@@ -59,11 +80,24 @@ private:
*
* @return The number of arguments enqueued per tensor object.
*/
- template <unsigned int dimension_size>
+ template <unsigned int dimension_size>
constexpr static unsigned int num_arguments_per_tensor()
{
return 2 + 2 * dimension_size;
}
+
+ /** Get default lws for the kernel
+ *
+ * @param[in] window Execution window used by the kernel
+ * @param[in] use_dummy_work_items If the kernel uses dummy workloads
+ *
+ * @return cl::NDRange
+ */
+ cl::NDRange default_lws_tune(const Window &window, bool use_dummy_work_items)
+ {
+ return get_default_lws_for_type(_type, gws_from_window(window, use_dummy_work_items));
+ }
+
using IKernel::configure; //Prevent children from calling IKernel::configure() directly
protected:
/** Configure the kernel's window and local workgroup size hint.
@@ -82,16 +116,32 @@ protected:
* @param[in] window The maximum window which will be returned by window()
* @param[in] tuning_params_hint (Optional) Tuning parameters to use.
*/
- void configure_internal(const Window &window, CLTuningParams tuning_params_hint = CLTuningParams(CLKernelLibrary::get().default_ndrange(), 0))
+ void configure_internal(const Window &window,
+ CLTuningParams tuning_params_hint = CLTuningParams(CLKernelLibrary::get().default_ndrange(),
+ 0))
{
_tuning_params_hint = tuning_params_hint;
+
+ if (is_same_lws(_tuning_params_hint.get_lws(), CLKernelLibrary::get().default_ndrange()))
+ {
+ // Disable use_dummy_work_items at configure time. Because dummy work items only affect gws size, which
+ // will be recalculated with use_dummy_work_items flag at run time again anyway.
+ _tuning_params_hint.set_lws(default_lws_tune(window, false /* use_dummy_work_items */));
+ }
+
IKernel::configure(window);
}
public:
/** Constructor */
ICLKernel()
- : _kernel(nullptr), _target(GPUTarget::MIDGARD), _config_id(arm_compute::default_config_id), _max_workgroup_size(0), _tuning_params_hint()
+ : _kernel(nullptr),
+ _target(GPUTarget::MIDGARD),
+ _config_id(arm_compute::default_config_id),
+ _max_workgroup_size(0),
+ _type(CLKernelType::UNKNOWN),
+ _tuning_params_hint(),
+ _cached_gws(cl::NullRange)
{
}
/** Returns a reference to the OpenCL kernel of this object.
@@ -102,6 +152,14 @@ public:
{
return _kernel;
}
+ /** Returns the CL kernel type
+ *
+ * @return The CL kernel type
+ */
+ CLKernelType type() const
+ {
+ return _type;
+ }
/** Add the passed 1D array's parameters to the object's kernel's arguments starting from the index idx.
*
* @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
@@ -111,7 +169,11 @@ public:
* @param[in] window Window the kernel will be executed on.
*/
template <typename T>
- void add_1D_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
+ void add_1D_array_argument(unsigned int &idx,
+ const ICLArray<T> *array,
+ const Strides &strides,
+ unsigned int num_dimensions,
+ const Window &window)
{
add_array_argument<T, 1>(idx, array, strides, num_dimensions, window);
}
@@ -134,7 +196,7 @@ public:
*/
void add_1D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
{
- if(cond)
+ if (cond)
{
add_1D_tensor_argument(idx, tensor, window);
}
@@ -158,7 +220,7 @@ public:
*/
void add_2D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
{
- if(cond)
+ if (cond)
{
add_2D_tensor_argument(idx, tensor, window);
}
@@ -183,6 +245,51 @@ public:
{
add_tensor_argument<4>(idx, tensor, window);
}
+ /** Add the passed 5D tensor's parameters to the object's kernel's arguments starting from the index idx.
+ *
+ * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
+ * @param[in] tensor Tensor to set as an argument of the object's kernel.
+ * @param[in] window Window the kernel will be executed on.
+ */
+ void add_5D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
+ {
+ add_tensor_argument<5>(idx, tensor, window);
+ }
+
+ /** Add the passed NHW 3D tensor's parameters to the object's kernel's arguments by passing strides, dimensions and the offset to the first valid element in bytes.
+ *
+ * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
+ * @param[in] tensor Tensor to set as an argument of the object's kernel.
+ */
+ void add_3d_tensor_nhw_argument(unsigned int &idx, const ICLTensor *tensor);
+
+ /** Returns the number of arguments enqueued per NHW 3D Tensor object.
+ *
+ * @return The number of arguments enqueued per NHW 3D Tensor object.
+ */
+ constexpr static unsigned int num_arguments_per_3d_tensor_nhw()
+ {
+ constexpr unsigned int no_args_per_3d_tensor_nhw = 7u;
+ return no_args_per_3d_tensor_nhw;
+ }
+
+ /** Add the passed NHWC 4D tensor's parameters to the object's kernel's arguments by passing strides, dimensions and the offset to the first valid element in bytes.
+ *
+ * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
+ * @param[in] tensor Tensor to set as an argument of the object's kernel.
+ */
+ void add_4d_tensor_nhwc_argument(unsigned int &idx, const ICLTensor *tensor);
+
+ /** Returns the number of arguments enqueued per NHWC 4D Tensor object.
+ *
+ * @return The number of arguments enqueued per NHWC 4D Tensor object.
+ */
+ constexpr static unsigned int num_arguments_per_4d_tensor_nhwc()
+ {
+ constexpr unsigned int no_args_per_4d_tensor_nhwc = 9u;
+ return no_args_per_4d_tensor_nhwc;
+ }
+
/** Returns the number of arguments enqueued per 1D array object.
*
* @return The number of arguments enqueues per 1D array object.
@@ -345,11 +452,24 @@ public:
size_t get_max_workgroup_size();
/** Get the global work size given an execution window
*
- * @param[in] window Execution window
+ * @param[in] window Execution window
+ * @param[in] use_dummy_work_items If the kernel uses dummy work items
*
* @return Global work size of the given execution window
*/
- static cl::NDRange gws_from_window(const Window &window);
+ static cl::NDRange gws_from_window(const Window &window, bool use_dummy_work_items);
+
+ /** Get the cached gws used to enqueue this kernel
+ *
+ * @return Latest global work size of the kernel
+ */
+ cl::NDRange get_cached_gws() const;
+
+ /** Cache the latest gws used to enqueue this kernel
+ *
+ * @param[in] gws Latest global work size of the kernel
+ */
+ void cache_gws(const cl::NDRange &gws);
private:
/** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
@@ -361,7 +481,11 @@ private:
* @param[in] window Window the kernel will be executed on.
*/
template <typename T, unsigned int dimension_size>
- void add_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window);
+ void add_array_argument(unsigned int &idx,
+ const ICLArray<T> *array,
+ const Strides &strides,
+ unsigned int num_dimensions,
+ const Window &window);
/** Add the passed tensor's parameters to the object's kernel's arguments starting from the index idx.
*
* @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
@@ -372,12 +496,14 @@ private:
void add_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
protected:
- cl::Kernel _kernel; /**< OpenCL kernel to run */
- GPUTarget _target; /**< The targeted GPU */
- std::string _config_id; /**< Configuration ID */
- size_t _max_workgroup_size; /**< The maximum workgroup size for this kernel */
+ cl::Kernel _kernel; /**< OpenCL kernel to run */
+ GPUTarget _target; /**< The targeted GPU */
+ std::string _config_id; /**< Configuration ID */
+ size_t _max_workgroup_size; /**< The maximum workgroup size for this kernel */
+ CLKernelType _type; /**< The CL kernel type */
private:
CLTuningParams _tuning_params_hint; /**< Tuning parameters hint for the OpenCL kernel */
+ cl::NDRange _cached_gws; /**< Latest GWS used to enqueue this kernel */
};
/** Add the kernel to the command queue with the given window.
@@ -395,7 +521,11 @@ private:
*
* @note If any dimension of the lws is greater than the global workgroup size then no lws will be passed.
*/
-void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint = CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items = false);
+void enqueue(cl::CommandQueue &queue,
+ ICLKernel &kernel,
+ const Window &window,
+ const cl::NDRange &lws_hint = CLKernelLibrary::get().default_ndrange(),
+ bool use_dummy_work_items = false);
/** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
*
@@ -406,14 +536,15 @@ void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, c
* @param[in] window Window the kernel will be executed on.
*/
template <typename T, unsigned int dimension_size>
-void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
+void ICLKernel::add_array_argument(
+ unsigned &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
{
ARM_COMPUTE_ERROR_ON(array == nullptr);
// Calculate offset to the start of the window
unsigned int offset_first_element = 0;
- for(unsigned int n = 0; n < num_dimensions; ++n)
+ for (unsigned int n = 0; n < num_dimensions; ++n)
{
offset_first_element += window[n].start() * strides[n];
}
@@ -421,7 +552,7 @@ void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, cons
unsigned int idx_start = idx;
_kernel.setArg(idx++, array->cl_buffer());
- for(unsigned int dimension = 0; dimension < dimension_size; dimension++)
+ for (unsigned int dimension = 0; dimension < dimension_size; dimension++)
{
_kernel.setArg<cl_uint>(idx++, strides[dimension]);
_kernel.setArg<cl_uint>(idx++, strides[dimension] * window[dimension].step());
@@ -430,8 +561,9 @@ void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, cons
_kernel.setArg<cl_uint>(idx++, offset_first_element);
ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_array<dimension_size>() != idx,
- "add_%dD_array_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_array<dimension_size>());
+ "add_%dD_array_argument() is supposed to add exactly %d arguments to the kernel",
+ dimension_size, num_arguments_per_array<dimension_size>());
ARM_COMPUTE_UNUSED(idx_start);
}
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_ICLKERNEL_H */
diff --git a/src/core/CL/ICLLut.cpp b/src/core/CL/ICLLut.cpp
deleted file mode 100644
index 007a5240f2..0000000000
--- a/src/core/CL/ICLLut.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2016, 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/CL/ICLLut.h"
-
-using namespace arm_compute;
-
-ICLLut::ICLLut()
- : _mapping(nullptr)
-{
-}
-
-void ICLLut::map(cl::CommandQueue &q, bool blocking)
-{
- _mapping = do_map(q, blocking);
-}
-
-void ICLLut::unmap(cl::CommandQueue &q)
-{
- do_unmap(q);
- _mapping = nullptr;
-}
-
-uint8_t *ICLLut::buffer() const
-{
- return _mapping;
-}
diff --git a/src/core/CL/ICLMultiHOG.cpp b/src/core/CL/ICLMultiHOG.cpp
deleted file mode 100644
index 73bee39883..0000000000
--- a/src/core/CL/ICLMultiHOG.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/CL/ICLMultiHOG.h"
-
-#include "arm_compute/core/IHOG.h"
-
-using namespace arm_compute;
-
-IHOG *ICLMultiHOG::model(size_t index)
-{
- return cl_model(index);
-}
-
-const IHOG *ICLMultiHOG::model(size_t index) const
-{
- return cl_model(index);
-} \ No newline at end of file
diff --git a/src/core/CL/ICLSimple2DKernel.cpp b/src/core/CL/ICLSimple2DKernel.cpp
index 5d8295bdfe..3f7edbb88d 100644
--- a/src/core/CL/ICLSimple2DKernel.cpp
+++ b/src/core/CL/ICLSimple2DKernel.cpp
@@ -40,6 +40,5 @@ void ICLSimple2DKernel::run(const Window &window, cl::CommandQueue &queue)
add_2D_tensor_argument(idx, _input, slice);
add_2D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_2D(slice));
+ } while (window.slide_window_slice_2D(slice));
}
diff --git a/src/core/CL/ICLSimple2DKernel.h b/src/core/CL/ICLSimple2DKernel.h
index 5246492401..97bc1e58c2 100644
--- a/src/core/CL/ICLSimple2DKernel.h
+++ b/src/core/CL/ICLSimple2DKernel.h
@@ -37,5 +37,5 @@ public:
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
};
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_ICLSIMPLE2DKERNEL_H */
diff --git a/src/core/CL/ICLSimple3DKernel.cpp b/src/core/CL/ICLSimple3DKernel.cpp
index fef1a86125..71d7d1f07b 100644
--- a/src/core/CL/ICLSimple3DKernel.cpp
+++ b/src/core/CL/ICLSimple3DKernel.cpp
@@ -42,6 +42,5 @@ void ICLSimple3DKernel::run(const Window &window, cl::CommandQueue &queue)
add_3D_tensor_argument(idx, _input, slice);
add_3D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
+ } while (window.slide_window_slice_3D(slice));
}
diff --git a/src/core/CL/ICLSimple3DKernel.h b/src/core/CL/ICLSimple3DKernel.h
index ff0b274663..5071b6b339 100644
--- a/src/core/CL/ICLSimple3DKernel.h
+++ b/src/core/CL/ICLSimple3DKernel.h
@@ -39,5 +39,5 @@ public:
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
};
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_ICLSIMPLE3DKERNEL_H */
diff --git a/src/core/CL/ICLSimpleKernel.cpp b/src/core/CL/ICLSimpleKernel.cpp
index d67fefdf71..c31db8355f 100644
--- a/src/core/CL/ICLSimpleKernel.cpp
+++ b/src/core/CL/ICLSimpleKernel.cpp
@@ -22,30 +22,35 @@
* SOFTWARE.
*/
#include "src/core/CL/ICLSimpleKernel.h"
+
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/IAccessWindow.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+
#include "src/core/helpers/WindowHelpers.h"
using namespace arm_compute;
-ICLSimpleKernel::ICLSimpleKernel()
- : _input(nullptr), _output(nullptr)
+ICLSimpleKernel::ICLSimpleKernel() : _input(nullptr), _output(nullptr)
{
}
-void ICLSimpleKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int num_elems_processed_per_iteration, bool border_undefined, const BorderSize &border_size)
+void ICLSimpleKernel::configure(const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int num_elems_processed_per_iteration,
+ bool border_undefined,
+ const BorderSize &border_size)
{
_input = input;
_output = output;
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, border_size);
+ Window win =
+ calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, border_size);
AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
- update_window_and_padding(win,
- AccessWindowHorizontal(input->info(), 0, num_elems_processed_per_iteration),
+ update_window_and_padding(win, AccessWindowHorizontal(input->info(), 0, num_elems_processed_per_iteration),
output_access);
output_access.set_valid_region(win, input->info()->valid_region(), border_undefined, border_size);
diff --git a/src/core/CL/ICLSimpleKernel.h b/src/core/CL/ICLSimpleKernel.h
index b35547a217..6afd7309aa 100644
--- a/src/core/CL/ICLSimpleKernel.h
+++ b/src/core/CL/ICLSimpleKernel.h
@@ -26,6 +26,7 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -55,12 +56,16 @@ public:
* @param[in] border_undefined (Optional) True if the border mode is undefined. False if it's replicate or constant.
* @param[in] border_size (Optional) Size of the border.
*/
- void configure(const ICLTensor *input, ICLTensor *output, unsigned int num_elems_processed_per_iteration, bool border_undefined = false, const BorderSize &border_size = BorderSize());
+ void configure(const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int num_elems_processed_per_iteration,
+ bool border_undefined = false,
+ const BorderSize &border_size = BorderSize());
protected:
const ICLTensor *_input;
ICLTensor *_output;
};
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_ICLSIMPLEKERNEL_H */
diff --git a/src/core/CL/ICLTensor.cpp b/src/core/CL/ICLTensor.cpp
index b541bff04a..0771db7f50 100644
--- a/src/core/CL/ICLTensor.cpp
+++ b/src/core/CL/ICLTensor.cpp
@@ -27,8 +27,7 @@
using namespace arm_compute;
-ICLTensor::ICLTensor()
- : _mapping(nullptr)
+ICLTensor::ICLTensor() : _mapping(nullptr)
{
}
diff --git a/src/core/CL/OpenCL.cpp b/src/core/CL/OpenCL.cpp
index 152759b4df..2ebc3274aa 100644
--- a/src/core/CL/OpenCL.cpp
+++ b/src/core/CL/OpenCL.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,28 +29,17 @@
#include "arm_compute/core/Error.h"
+#include <algorithm>
#include <dlfcn.h>
#include <iostream>
+#include <sstream>
namespace arm_compute
{
-CLSymbols::CLSymbols() noexcept(false)
- : _loaded(
-{
- false, false
-}),
-_handle(nullptr)
+CLSymbols::CLSymbols() noexcept(false) : _loaded({false, false})
{
}
-CLSymbols::~CLSymbols()
-{
- if(_handle != nullptr)
- {
- dlclose(_handle);
- }
-}
-
CLSymbols &CLSymbols::get()
{
static CLSymbols symbols;
@@ -59,9 +48,9 @@ CLSymbols &CLSymbols::get()
bool CLSymbols::load_default()
{
- static const std::vector<std::string> libraries{ "libOpenCL.so", "libGLES_mali.so", "libmali.so" };
+ static const std::vector<std::string> libraries_filenames{"libOpenCL.so", "libGLES_mali.so", "libmali.so"};
- if(_loaded.first)
+ if (_loaded.first)
{
return _loaded.second;
}
@@ -69,88 +58,174 @@ bool CLSymbols::load_default()
// Indicate that default loading has been tried
_loaded.first = true;
- for(const auto &lib : libraries)
+ if (load(libraries_filenames, /* use_loader */ false))
{
- if(load(lib))
- {
- ARM_COMPUTE_ERROR_ON_MSG(this->clBuildProgram_ptr == nullptr, "Failed to load OpenCL symbols from shared library");
- return true;
- }
+ ARM_COMPUTE_ERROR_ON_MSG(this->clBuildProgram_ptr == nullptr,
+ "Failed to load OpenCL symbols from shared library");
+ return true;
+ }
+
+#ifdef __ANDROID__
+ // When running in NDK environment, the above libraries are not accessible.
+ static const std::vector<std::string> android_libraries_filenames{"libOpenCL-pixel.so", "libOpenCL-car.so"};
+
+ if (load(android_libraries_filenames, /* use_loader */ true))
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(this->clBuildProgram_ptr == nullptr,
+ "Failed to load OpenCL symbols from android shared library");
+ return true;
}
+#endif // __ANDROID__
- std::cerr << "Couldn't find any OpenCL library.\n";
+ // If not returned till here then libraries not found
+ std::stringstream ss;
+ std::for_each(libraries_filenames.begin(), libraries_filenames.end(),
+ [&ss](const std::string &s) { ss << s << " "; });
+#ifdef __ANDROID__
+ std::for_each(android_libraries_filenames.begin(), android_libraries_filenames.end(),
+ [&ss](const std::string &s) { ss << s << " "; });
+#endif // __ANDROID__
+ std::cerr << "Couldn't find any of the following OpenCL library: " << ss.str() << std::endl;
return false;
}
-bool CLSymbols::load(const std::string &library)
+bool CLSymbols::load(const std::vector<std::string> &libraries_filenames, bool use_loader)
{
- _handle = dlopen(library.c_str(), RTLD_LAZY | RTLD_LOCAL);
-
- if(_handle == nullptr)
+ void *handle = nullptr;
+ unsigned int index = 0;
+ for (index = 0; index < libraries_filenames.size(); ++index)
+ {
+ handle = dlopen(libraries_filenames[index].c_str(), RTLD_LAZY | RTLD_LOCAL);
+ if (handle != nullptr)
+ {
+ break;
+ }
+ }
+ if (index == libraries_filenames.size())
{
- std::cerr << "Can't load " << library << ": " << dlerror() << "\n";
// Set status of loading to failed
_loaded.second = false;
return false;
}
+#ifdef __ANDROID__
+ typedef void *(*loadOpenCLPointer_t)(const char *name);
+ loadOpenCLPointer_t loadOpenCLPointer;
+ if (use_loader)
+ {
+ typedef void (*enableOpenCL_t)();
+ enableOpenCL_t enableOpenCL = reinterpret_cast<enableOpenCL_t>(dlsym(handle, "enableOpenCL"));
+ enableOpenCL();
+
+ loadOpenCLPointer = reinterpret_cast<loadOpenCLPointer_t>(dlsym(handle, "loadOpenCLPointer"));
+ }
+ else
+ {
+ loadOpenCLPointer = nullptr;
+ }
+#define LOAD_FUNCTION_PTR(func_name, _handle) \
+ func_name##_ptr = reinterpret_cast<decltype(func_name) *>(use_loader ? loadOpenCLPointer(#func_name) \
+ : dlsym(handle, #func_name));
+#else /* __ANDROID__ */
+ (void)use_loader; // Avoid unused warning
#define LOAD_FUNCTION_PTR(func_name, handle) \
func_name##_ptr = reinterpret_cast<decltype(func_name) *>(dlsym(handle, #func_name));
+#endif /* __ANDROID__ */
+
+#define LOAD_EXTENSION_FUNCTION_PTR(func_name, platform_id) \
+ func_name##_ptr = \
+ reinterpret_cast<decltype(func_name) *>(clGetExtensionFunctionAddressForPlatform(platform_id, #func_name));
+
+ LOAD_FUNCTION_PTR(clCreateContext, handle);
+ LOAD_FUNCTION_PTR(clCreateContextFromType, handle);
+ LOAD_FUNCTION_PTR(clCreateCommandQueue, handle);
+ LOAD_FUNCTION_PTR(clCreateCommandQueueWithProperties, handle);
+ LOAD_FUNCTION_PTR(clGetContextInfo, handle);
+ LOAD_FUNCTION_PTR(clBuildProgram, handle);
+ LOAD_FUNCTION_PTR(clEnqueueNDRangeKernel, handle);
+ LOAD_FUNCTION_PTR(clSetKernelArg, handle);
+ LOAD_FUNCTION_PTR(clReleaseKernel, handle);
+ LOAD_FUNCTION_PTR(clCreateProgramWithSource, handle);
+ LOAD_FUNCTION_PTR(clCreateBuffer, handle);
+ LOAD_FUNCTION_PTR(clRetainKernel, handle);
+ LOAD_FUNCTION_PTR(clCreateKernel, handle);
+ LOAD_FUNCTION_PTR(clGetProgramInfo, handle);
+ LOAD_FUNCTION_PTR(clFlush, handle);
+ LOAD_FUNCTION_PTR(clFinish, handle);
+ LOAD_FUNCTION_PTR(clReleaseProgram, handle);
+ LOAD_FUNCTION_PTR(clRetainContext, handle);
+ LOAD_FUNCTION_PTR(clCreateProgramWithBinary, handle);
+ LOAD_FUNCTION_PTR(clReleaseCommandQueue, handle);
+ LOAD_FUNCTION_PTR(clEnqueueMapBuffer, handle);
+ LOAD_FUNCTION_PTR(clRetainProgram, handle);
+ LOAD_FUNCTION_PTR(clGetProgramBuildInfo, handle);
+ LOAD_FUNCTION_PTR(clEnqueueReadBuffer, handle);
+ LOAD_FUNCTION_PTR(clEnqueueWriteBuffer, handle);
+ LOAD_FUNCTION_PTR(clReleaseEvent, handle);
+ LOAD_FUNCTION_PTR(clReleaseContext, handle);
+ LOAD_FUNCTION_PTR(clRetainCommandQueue, handle);
+ LOAD_FUNCTION_PTR(clEnqueueUnmapMemObject, handle);
+ LOAD_FUNCTION_PTR(clRetainMemObject, handle);
+ LOAD_FUNCTION_PTR(clReleaseMemObject, handle);
+ LOAD_FUNCTION_PTR(clGetDeviceInfo, handle);
+ LOAD_FUNCTION_PTR(clGetDeviceIDs, handle);
+ LOAD_FUNCTION_PTR(clGetMemObjectInfo, handle);
+ LOAD_FUNCTION_PTR(clRetainEvent, handle);
+ LOAD_FUNCTION_PTR(clGetPlatformInfo, handle);
+ LOAD_FUNCTION_PTR(clGetPlatformIDs, handle);
+ LOAD_FUNCTION_PTR(clGetKernelWorkGroupInfo, handle);
+ LOAD_FUNCTION_PTR(clGetCommandQueueInfo, handle);
+ LOAD_FUNCTION_PTR(clGetKernelInfo, handle);
+ LOAD_FUNCTION_PTR(clGetEventProfilingInfo, handle);
+ LOAD_FUNCTION_PTR(clSVMAlloc, handle);
+ LOAD_FUNCTION_PTR(clSVMFree, handle);
+ LOAD_FUNCTION_PTR(clEnqueueSVMMap, handle);
+ LOAD_FUNCTION_PTR(clEnqueueSVMUnmap, handle);
+ LOAD_FUNCTION_PTR(clEnqueueMarker, handle);
+ LOAD_FUNCTION_PTR(clWaitForEvents, handle);
+ LOAD_FUNCTION_PTR(clCreateImage, handle);
+ LOAD_FUNCTION_PTR(clSetKernelExecInfo, handle);
+ LOAD_FUNCTION_PTR(clGetExtensionFunctionAddressForPlatform, handle);
+
+ // Load Extensions
+
+ // Number of platforms is assumed to be 1. For this to be greater than 1,
+ // the system must have more than one OpenCL implementation provided by
+ // different vendors. This is not our use case. Besides, the library
+ // already assumes one implementation as it uses one handle to load core
+ // functions.
+ constexpr unsigned int num_platforms = 1U;
+ std::vector<cl_platform_id> platform_ids(num_platforms);
+ cl_int err = clGetPlatformIDs(num_platforms, platform_ids.data(), nullptr);
+ if (err != CL_SUCCESS)
+ {
+ return false;
+ }
+
+ // Command buffer and mutable dispatch command buffer extensions
+ /// TODO: (COMPMID-6742) Load Command Buffer extensions in a Portable way
+ /// using clGetExtensionFunctionAddressForPlatform().
+ /// The details can be found here:
+ /// https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_Ext.html#getting-opencl-api-extension-function-pointers
+ ///
+ /// @note: There are some problems reported while loading these extensions in the recommended way.
+ /// For details, please see COMPUTE-16545
+ LOAD_FUNCTION_PTR(clCreateCommandBufferKHR, handle);
+ LOAD_FUNCTION_PTR(clRetainCommandBufferKHR, handle);
+ LOAD_FUNCTION_PTR(clReleaseCommandBufferKHR, handle);
+ LOAD_FUNCTION_PTR(clFinalizeCommandBufferKHR, handle);
+ LOAD_FUNCTION_PTR(clEnqueueCommandBufferKHR, handle);
+ LOAD_FUNCTION_PTR(clCommandNDRangeKernelKHR, handle);
- LOAD_FUNCTION_PTR(clCreateContext, _handle);
- LOAD_FUNCTION_PTR(clCreateContextFromType, _handle);
- LOAD_FUNCTION_PTR(clCreateCommandQueue, _handle);
- LOAD_FUNCTION_PTR(clGetContextInfo, _handle);
- LOAD_FUNCTION_PTR(clBuildProgram, _handle);
- LOAD_FUNCTION_PTR(clEnqueueNDRangeKernel, _handle);
- LOAD_FUNCTION_PTR(clSetKernelArg, _handle);
- LOAD_FUNCTION_PTR(clReleaseKernel, _handle);
- LOAD_FUNCTION_PTR(clCreateProgramWithSource, _handle);
- LOAD_FUNCTION_PTR(clCreateBuffer, _handle);
- LOAD_FUNCTION_PTR(clRetainKernel, _handle);
- LOAD_FUNCTION_PTR(clCreateKernel, _handle);
- LOAD_FUNCTION_PTR(clGetProgramInfo, _handle);
- LOAD_FUNCTION_PTR(clFlush, _handle);
- LOAD_FUNCTION_PTR(clFinish, _handle);
- LOAD_FUNCTION_PTR(clReleaseProgram, _handle);
- LOAD_FUNCTION_PTR(clRetainContext, _handle);
- LOAD_FUNCTION_PTR(clCreateProgramWithBinary, _handle);
- LOAD_FUNCTION_PTR(clReleaseCommandQueue, _handle);
- LOAD_FUNCTION_PTR(clEnqueueMapBuffer, _handle);
- LOAD_FUNCTION_PTR(clRetainProgram, _handle);
- LOAD_FUNCTION_PTR(clGetProgramBuildInfo, _handle);
- LOAD_FUNCTION_PTR(clEnqueueReadBuffer, _handle);
- LOAD_FUNCTION_PTR(clEnqueueWriteBuffer, _handle);
- LOAD_FUNCTION_PTR(clReleaseEvent, _handle);
- LOAD_FUNCTION_PTR(clReleaseContext, _handle);
- LOAD_FUNCTION_PTR(clRetainCommandQueue, _handle);
- LOAD_FUNCTION_PTR(clEnqueueUnmapMemObject, _handle);
- LOAD_FUNCTION_PTR(clRetainMemObject, _handle);
- LOAD_FUNCTION_PTR(clReleaseMemObject, _handle);
- LOAD_FUNCTION_PTR(clGetDeviceInfo, _handle);
- LOAD_FUNCTION_PTR(clGetDeviceIDs, _handle);
- LOAD_FUNCTION_PTR(clGetMemObjectInfo, _handle);
- LOAD_FUNCTION_PTR(clRetainEvent, _handle);
- LOAD_FUNCTION_PTR(clGetPlatformIDs, _handle);
- LOAD_FUNCTION_PTR(clGetKernelWorkGroupInfo, _handle);
- LOAD_FUNCTION_PTR(clGetCommandQueueInfo, _handle);
- LOAD_FUNCTION_PTR(clGetKernelInfo, _handle);
- LOAD_FUNCTION_PTR(clGetEventProfilingInfo, _handle);
- LOAD_FUNCTION_PTR(clSVMAlloc, _handle);
- LOAD_FUNCTION_PTR(clSVMFree, _handle);
- LOAD_FUNCTION_PTR(clEnqueueSVMMap, _handle);
- LOAD_FUNCTION_PTR(clEnqueueSVMUnmap, _handle);
- LOAD_FUNCTION_PTR(clEnqueueMarker, _handle);
- LOAD_FUNCTION_PTR(clWaitForEvents, _handle);
- LOAD_FUNCTION_PTR(clCreateImage, _handle);
- LOAD_FUNCTION_PTR(clSetKernelExecInfo, _handle);
+ LOAD_FUNCTION_PTR(clUpdateMutableCommandsKHR, handle);
// Third-party extensions
- LOAD_FUNCTION_PTR(clImportMemoryARM, _handle);
+ LOAD_EXTENSION_FUNCTION_PTR(clImportMemoryARM, platform_ids[0]);
#undef LOAD_FUNCTION_PTR
+#undef LOAD_EXTENSION_FUNCTION_PTR
- //Don't call dlclose(_handle) or all the symbols will be unloaded !
+ //Don't call dlclose(handle) or all the symbols will be unloaded !
// Disable default loading and set status to successful
_loaded = std::make_pair(true, true);
@@ -161,16 +236,32 @@ bool CLSymbols::load(const std::string &library)
bool opencl_is_available()
{
CLSymbols::get().load_default();
+
+ // Using static objects that rely on OpenCL in their constructor or
+ // destructor is implementation defined according to the OpenCL API
+ // Specification. These objects include CLScheduler.
+ //
+ // For compatibility with OpenCL runtimes that also use static objects to
+ // hold their state, we call a harmless OpenCL function (clGetPlatformIDs
+ // with invalid parameters must result in CL_INVALID_VALUE) to ensure the
+ // runtimes have a chance to initialize their static objects first. Thanks
+ // to C++11 rules about normal program completion (cf [basic.start]), this
+ // ensures their static objects are destroyed last, i.e. after the
+ // singleton CLScheduler is destroyed.
+ //
+ // When OpenCL is not available, this call results in CL_OUT_OF_RESOURCES,
+ // which is equally harmless.
+ (void)clGetPlatformIDs(0, nullptr, nullptr);
+
return CLSymbols::get().clBuildProgram_ptr != nullptr;
}
} // namespace arm_compute
-cl_int clEnqueueMarker(cl_command_queue command_queue,
- cl_event *event)
+cl_int clEnqueueMarker(cl_command_queue command_queue, cl_event *event)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clEnqueueMarker_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(command_queue, event);
}
@@ -180,12 +271,11 @@ cl_int clEnqueueMarker(cl_command_queue command_queue,
}
}
-cl_int clWaitForEvents(cl_uint num_events,
- const cl_event *event_list)
+cl_int clWaitForEvents(cl_uint num_events, const cl_event *event_list)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clWaitForEvents_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(num_events, event_list);
}
@@ -195,12 +285,18 @@ cl_int clWaitForEvents(cl_uint num_events,
}
}
-cl_int clEnqueueSVMMap(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags flags, void *svm_ptr,
- size_t size, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event)
+cl_int clEnqueueSVMMap(cl_command_queue command_queue,
+ cl_bool blocking_map,
+ cl_map_flags flags,
+ void *svm_ptr,
+ size_t size,
+ cl_uint num_events_in_wait_list,
+ const cl_event *event_wait_list,
+ cl_event *event)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clEnqueueSVMMap_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(command_queue, blocking_map, flags, svm_ptr, size, num_events_in_wait_list, event_wait_list, event);
}
@@ -210,12 +306,15 @@ cl_int clEnqueueSVMMap(cl_command_queue command_queue, cl_bool blocking_map, cl_
}
}
-cl_int clEnqueueSVMUnmap(cl_command_queue command_queue, void *svm_ptr, cl_uint num_events_in_wait_list,
- const cl_event *event_wait_list, cl_event *event)
+cl_int clEnqueueSVMUnmap(cl_command_queue command_queue,
+ void *svm_ptr,
+ cl_uint num_events_in_wait_list,
+ const cl_event *event_wait_list,
+ cl_event *event)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clEnqueueSVMUnmap_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(command_queue, svm_ptr, num_events_in_wait_list, event_wait_list, event);
}
@@ -229,7 +328,7 @@ void *clSVMAlloc(cl_context context, cl_svm_mem_flags_arm flags, size_t size, cl
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clSVMAlloc_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(context, flags, size, alignment);
}
@@ -243,7 +342,7 @@ void clSVMFree(cl_context context, void *svm_pointer)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clSVMFree_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
func(context, svm_pointer);
}
@@ -257,7 +356,7 @@ cl_int clGetContextInfo(cl_context context,
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetContextInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(context, param_name, param_value_size, param_value, param_value_size_ret);
}
@@ -274,7 +373,24 @@ cl_command_queue clCreateCommandQueue(cl_context context,
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clCreateCommandQueue_ptr;
- if(func != nullptr)
+ if (func != nullptr)
+ {
+ return func(context, device, properties, errcode_ret);
+ }
+ else
+ {
+ return nullptr;
+ }
+}
+
+cl_command_queue clCreateCommandQueueWithProperties(cl_context context,
+ cl_device_id device,
+ const cl_queue_properties *properties,
+ cl_int *errcode_ret)
+{
+ arm_compute::CLSymbols::get().load_default();
+ auto func = arm_compute::CLSymbols::get().clCreateCommandQueueWithProperties_ptr;
+ if (func != nullptr)
{
return func(context, device, properties, errcode_ret);
}
@@ -284,17 +400,16 @@ cl_command_queue clCreateCommandQueue(cl_context context,
}
}
-cl_context clCreateContext(
- const cl_context_properties *properties,
- cl_uint num_devices,
- const cl_device_id *devices,
- void (*pfn_notify)(const char *, const void *, size_t, void *),
- void *user_data,
- cl_int *errcode_ret)
+cl_context clCreateContext(const cl_context_properties *properties,
+ cl_uint num_devices,
+ const cl_device_id *devices,
+ void (*pfn_notify)(const char *, const void *, size_t, void *),
+ void *user_data,
+ cl_int *errcode_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clCreateContext_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(properties, num_devices, devices, pfn_notify, user_data, errcode_ret);
}
@@ -312,7 +427,7 @@ cl_context clCreateContextFromType(const cl_context_properties *properties,
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clCreateContextFromType_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(properties, device_type, pfn_notify, user_data, errcode_ret);
}
@@ -322,17 +437,16 @@ cl_context clCreateContextFromType(const cl_context_properties *properties,
}
}
-cl_int clBuildProgram(
- cl_program program,
- cl_uint num_devices,
- const cl_device_id *device_list,
- const char *options,
- void(CL_CALLBACK *pfn_notify)(cl_program program, void *user_data),
- void *user_data)
+cl_int clBuildProgram(cl_program program,
+ cl_uint num_devices,
+ const cl_device_id *device_list,
+ const char *options,
+ void(CL_CALLBACK *pfn_notify)(cl_program program, void *user_data),
+ void *user_data)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clBuildProgram_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(program, num_devices, device_list, options, pfn_notify, user_data);
}
@@ -342,22 +456,22 @@ cl_int clBuildProgram(
}
}
-cl_int clEnqueueNDRangeKernel(
- cl_command_queue command_queue,
- cl_kernel kernel,
- cl_uint work_dim,
- const size_t *global_work_offset,
- const size_t *global_work_size,
- const size_t *local_work_size,
- cl_uint num_events_in_wait_list,
- const cl_event *event_wait_list,
- cl_event *event)
+cl_int clEnqueueNDRangeKernel(cl_command_queue command_queue,
+ cl_kernel kernel,
+ cl_uint work_dim,
+ const size_t *global_work_offset,
+ const size_t *global_work_size,
+ const size_t *local_work_size,
+ cl_uint num_events_in_wait_list,
+ const cl_event *event_wait_list,
+ cl_event *event)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clEnqueueNDRangeKernel_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
- return func(command_queue, kernel, work_dim, global_work_offset, global_work_size, local_work_size, num_events_in_wait_list, event_wait_list, event);
+ return func(command_queue, kernel, work_dim, global_work_offset, global_work_size, local_work_size,
+ num_events_in_wait_list, event_wait_list, event);
}
else
{
@@ -365,15 +479,11 @@ cl_int clEnqueueNDRangeKernel(
}
}
-cl_int clSetKernelArg(
- cl_kernel kernel,
- cl_uint arg_index,
- size_t arg_size,
- const void *arg_value)
+cl_int clSetKernelArg(cl_kernel kernel, cl_uint arg_index, size_t arg_size, const void *arg_value)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clSetKernelArg_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(kernel, arg_index, arg_size, arg_value);
}
@@ -387,7 +497,7 @@ cl_int clRetainMemObject(cl_mem memobj)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clRetainMemObject_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(memobj);
}
@@ -401,7 +511,7 @@ cl_int clReleaseMemObject(cl_mem memobj)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clReleaseMemObject_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(memobj);
}
@@ -411,17 +521,16 @@ cl_int clReleaseMemObject(cl_mem memobj)
}
}
-cl_int clEnqueueUnmapMemObject(
- cl_command_queue command_queue,
- cl_mem memobj,
- void *mapped_ptr,
- cl_uint num_events_in_wait_list,
- const cl_event *event_wait_list,
- cl_event *event)
+cl_int clEnqueueUnmapMemObject(cl_command_queue command_queue,
+ cl_mem memobj,
+ void *mapped_ptr,
+ cl_uint num_events_in_wait_list,
+ const cl_event *event_wait_list,
+ cl_event *event)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clEnqueueUnmapMemObject_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(command_queue, memobj, mapped_ptr, num_events_in_wait_list, event_wait_list, event);
}
@@ -435,7 +544,7 @@ cl_int clRetainCommandQueue(cl_command_queue command_queue)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clRetainCommandQueue_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(command_queue);
}
@@ -449,7 +558,7 @@ cl_int clReleaseContext(cl_context context)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clReleaseContext_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(context);
}
@@ -462,7 +571,7 @@ cl_int clReleaseEvent(cl_event event)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clReleaseEvent_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(event);
}
@@ -472,22 +581,22 @@ cl_int clReleaseEvent(cl_event event)
}
}
-cl_int clEnqueueWriteBuffer(
- cl_command_queue command_queue,
- cl_mem buffer,
- cl_bool blocking_write,
- size_t offset,
- size_t size,
- const void *ptr,
- cl_uint num_events_in_wait_list,
- const cl_event *event_wait_list,
- cl_event *event)
+cl_int clEnqueueWriteBuffer(cl_command_queue command_queue,
+ cl_mem buffer,
+ cl_bool blocking_write,
+ size_t offset,
+ size_t size,
+ const void *ptr,
+ cl_uint num_events_in_wait_list,
+ const cl_event *event_wait_list,
+ cl_event *event)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clEnqueueWriteBuffer_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
- return func(command_queue, buffer, blocking_write, offset, size, ptr, num_events_in_wait_list, event_wait_list, event);
+ return func(command_queue, buffer, blocking_write, offset, size, ptr, num_events_in_wait_list, event_wait_list,
+ event);
}
else
{
@@ -495,22 +604,22 @@ cl_int clEnqueueWriteBuffer(
}
}
-cl_int clEnqueueReadBuffer(
- cl_command_queue command_queue,
- cl_mem buffer,
- cl_bool blocking_read,
- size_t offset,
- size_t size,
- void *ptr,
- cl_uint num_events_in_wait_list,
- const cl_event *event_wait_list,
- cl_event *event)
+cl_int clEnqueueReadBuffer(cl_command_queue command_queue,
+ cl_mem buffer,
+ cl_bool blocking_read,
+ size_t offset,
+ size_t size,
+ void *ptr,
+ cl_uint num_events_in_wait_list,
+ const cl_event *event_wait_list,
+ cl_event *event)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clEnqueueReadBuffer_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
- return func(command_queue, buffer, blocking_read, offset, size, ptr, num_events_in_wait_list, event_wait_list, event);
+ return func(command_queue, buffer, blocking_read, offset, size, ptr, num_events_in_wait_list, event_wait_list,
+ event);
}
else
{
@@ -518,17 +627,16 @@ cl_int clEnqueueReadBuffer(
}
}
-cl_int clGetProgramBuildInfo(
- cl_program program,
- cl_device_id device,
- cl_program_build_info param_name,
- size_t param_value_size,
- void *param_value,
- size_t *param_value_size_ret)
+cl_int clGetProgramBuildInfo(cl_program program,
+ cl_device_id device,
+ cl_program_build_info param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetProgramBuildInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(program, device, param_name, param_value_size, param_value, param_value_size_ret);
}
@@ -542,7 +650,7 @@ cl_int clRetainProgram(cl_program program)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clRetainProgram_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(program);
}
@@ -552,27 +660,27 @@ cl_int clRetainProgram(cl_program program)
}
}
-void *clEnqueueMapBuffer(
- cl_command_queue command_queue,
- cl_mem buffer,
- cl_bool blocking_map,
- cl_map_flags map_flags,
- size_t offset,
- size_t size,
- cl_uint num_events_in_wait_list,
- const cl_event *event_wait_list,
- cl_event *event,
- cl_int *errcode_ret)
+void *clEnqueueMapBuffer(cl_command_queue command_queue,
+ cl_mem buffer,
+ cl_bool blocking_map,
+ cl_map_flags map_flags,
+ size_t offset,
+ size_t size,
+ cl_uint num_events_in_wait_list,
+ const cl_event *event_wait_list,
+ cl_event *event,
+ cl_int *errcode_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clEnqueueMapBuffer_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
- return func(command_queue, buffer, blocking_map, map_flags, offset, size, num_events_in_wait_list, event_wait_list, event, errcode_ret);
+ return func(command_queue, buffer, blocking_map, map_flags, offset, size, num_events_in_wait_list,
+ event_wait_list, event, errcode_ret);
}
else
{
- if(errcode_ret != nullptr)
+ if (errcode_ret != nullptr)
{
*errcode_ret = CL_OUT_OF_RESOURCES;
}
@@ -584,7 +692,7 @@ cl_int clReleaseCommandQueue(cl_command_queue command_queue)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clReleaseCommandQueue_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(command_queue);
}
@@ -594,24 +702,23 @@ cl_int clReleaseCommandQueue(cl_command_queue command_queue)
}
}
-cl_program clCreateProgramWithBinary(
- cl_context context,
- cl_uint num_devices,
- const cl_device_id *device_list,
- const size_t *lengths,
- const unsigned char **binaries,
- cl_int *binary_status,
- cl_int *errcode_ret)
+cl_program clCreateProgramWithBinary(cl_context context,
+ cl_uint num_devices,
+ const cl_device_id *device_list,
+ const size_t *lengths,
+ const unsigned char **binaries,
+ cl_int *binary_status,
+ cl_int *errcode_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clCreateProgramWithBinary_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(context, num_devices, device_list, lengths, binaries, binary_status, errcode_ret);
}
else
{
- if(errcode_ret != nullptr)
+ if (errcode_ret != nullptr)
{
*errcode_ret = CL_OUT_OF_RESOURCES;
}
@@ -623,7 +730,7 @@ cl_int clRetainContext(cl_context context)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clRetainContext_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(context);
}
@@ -637,7 +744,7 @@ cl_int clReleaseProgram(cl_program program)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clReleaseProgram_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(program);
}
@@ -651,7 +758,7 @@ cl_int clFlush(cl_command_queue command_queue)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clFlush_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(command_queue);
}
@@ -665,7 +772,7 @@ cl_int clFinish(cl_command_queue command_queue)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clFinish_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(command_queue);
}
@@ -675,16 +782,15 @@ cl_int clFinish(cl_command_queue command_queue)
}
}
-cl_int clGetProgramInfo(
- cl_program program,
- cl_program_info param_name,
- size_t param_value_size,
- void *param_value,
- size_t *param_value_size_ret)
+cl_int clGetProgramInfo(cl_program program,
+ cl_program_info param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetProgramInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(program, param_name, param_value_size, param_value, param_value_size_ret);
}
@@ -694,20 +800,17 @@ cl_int clGetProgramInfo(
}
}
-cl_kernel clCreateKernel(
- cl_program program,
- const char *kernel_name,
- cl_int *errcode_ret)
+cl_kernel clCreateKernel(cl_program program, const char *kernel_name, cl_int *errcode_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clCreateKernel_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(program, kernel_name, errcode_ret);
}
else
{
- if(errcode_ret != nullptr)
+ if (errcode_ret != nullptr)
{
*errcode_ret = CL_OUT_OF_RESOURCES;
}
@@ -719,7 +822,7 @@ cl_int clRetainKernel(cl_kernel kernel)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clRetainKernel_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(kernel);
}
@@ -729,22 +832,17 @@ cl_int clRetainKernel(cl_kernel kernel)
}
}
-cl_mem clCreateBuffer(
- cl_context context,
- cl_mem_flags flags,
- size_t size,
- void *host_ptr,
- cl_int *errcode_ret)
+cl_mem clCreateBuffer(cl_context context, cl_mem_flags flags, size_t size, void *host_ptr, cl_int *errcode_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clCreateBuffer_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(context, flags, size, host_ptr, errcode_ret);
}
else
{
- if(errcode_ret != nullptr)
+ if (errcode_ret != nullptr)
{
*errcode_ret = CL_OUT_OF_RESOURCES;
}
@@ -753,21 +851,17 @@ cl_mem clCreateBuffer(
}
cl_program clCreateProgramWithSource(
- cl_context context,
- cl_uint count,
- const char **strings,
- const size_t *lengths,
- cl_int *errcode_ret)
+ cl_context context, cl_uint count, const char **strings, const size_t *lengths, cl_int *errcode_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clCreateProgramWithSource_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(context, count, strings, lengths, errcode_ret);
}
else
{
- if(errcode_ret != nullptr)
+ if (errcode_ret != nullptr)
{
*errcode_ret = CL_OUT_OF_RESOURCES;
}
@@ -779,7 +873,7 @@ cl_int clReleaseKernel(cl_kernel kernel)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clReleaseKernel_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(kernel);
}
@@ -792,12 +886,12 @@ cl_int clReleaseKernel(cl_kernel kernel)
cl_int clGetDeviceIDs(cl_platform_id platform,
cl_device_type device_type,
cl_uint num_entries,
- cl_device_id *devices,
+ cl_device_id *devices,
cl_uint *num_devices)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetDeviceIDs_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(platform, device_type, num_entries, devices, num_devices);
}
@@ -815,7 +909,7 @@ cl_int clGetDeviceInfo(cl_device_id device,
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetDeviceInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(device, param_name, param_value_size, param_value, param_value_size_ret);
}
@@ -825,15 +919,12 @@ cl_int clGetDeviceInfo(cl_device_id device,
}
}
-cl_int clGetMemObjectInfo(cl_mem memobj,
- cl_mem_info param_name,
- size_t param_value_size,
- void *param_value,
- size_t *param_value_size_ret)
+cl_int clGetMemObjectInfo(
+ cl_mem memobj, cl_mem_info param_name, size_t param_value_size, void *param_value, size_t *param_value_size_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetMemObjectInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(memobj, param_name, param_value_size, param_value, param_value_size_ret);
}
@@ -847,7 +938,7 @@ cl_int clRetainEvent(cl_event event)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clRetainEvent_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(event);
}
@@ -857,11 +948,29 @@ cl_int clRetainEvent(cl_event event)
}
}
+cl_int clGetPlatformInfo(cl_platform_id platform,
+ cl_platform_info param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret)
+{
+ arm_compute::CLSymbols::get().load_default();
+ auto func = arm_compute::CLSymbols::get().clGetPlatformInfo_ptr;
+ if (func != nullptr)
+ {
+ return func(platform, param_name, param_value_size, param_value, param_value_size_ret);
+ }
+ else
+ {
+ return CL_OUT_OF_RESOURCES;
+ }
+}
+
cl_int clGetPlatformIDs(cl_uint num_entries, cl_platform_id *platforms, cl_uint *num_platforms)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetPlatformIDs_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(num_entries, platforms, num_platforms);
}
@@ -871,17 +980,16 @@ cl_int clGetPlatformIDs(cl_uint num_entries, cl_platform_id *platforms, cl_uint
}
}
-cl_int
-clGetKernelWorkGroupInfo(cl_kernel kernel,
- cl_device_id device,
- cl_kernel_work_group_info param_name,
- size_t param_value_size,
- void *param_value,
- size_t *param_value_size_ret)
+cl_int clGetKernelWorkGroupInfo(cl_kernel kernel,
+ cl_device_id device,
+ cl_kernel_work_group_info param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetKernelWorkGroupInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(kernel, device, param_name, param_value_size, param_value, param_value_size_ret);
}
@@ -891,16 +999,15 @@ clGetKernelWorkGroupInfo(cl_kernel kernel,
}
}
-cl_int
-clGetCommandQueueInfo(cl_command_queue command_queue,
- cl_command_queue_info param_name,
- size_t param_value_size,
- void *param_value,
- size_t *param_value_size_ret)
+cl_int clGetCommandQueueInfo(cl_command_queue command_queue,
+ cl_command_queue_info param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetCommandQueueInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(command_queue, param_name, param_value_size, param_value, param_value_size_ret);
}
@@ -910,16 +1017,15 @@ clGetCommandQueueInfo(cl_command_queue command_queue,
}
}
-cl_int
-clGetKernelInfo(cl_kernel kernel,
- cl_kernel_info param_name,
- size_t param_value_size,
- void *param_value,
- size_t *param_value_size_ret)
+cl_int clGetKernelInfo(cl_kernel kernel,
+ cl_kernel_info param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetKernelInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(kernel, param_name, param_value_size, param_value, param_value_size_ret);
}
@@ -929,16 +1035,15 @@ clGetKernelInfo(cl_kernel kernel,
}
}
-cl_int
-clGetEventProfilingInfo(cl_event event,
- cl_profiling_info param_name,
- size_t param_value_size,
- void *param_value,
- size_t *param_value_size_ret)
+cl_int clGetEventProfilingInfo(cl_event event,
+ cl_profiling_info param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clGetEventProfilingInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(event, param_name, param_value_size, param_value, param_value_size_ret);
}
@@ -948,23 +1053,22 @@ clGetEventProfilingInfo(cl_event event,
}
}
-cl_mem
-clCreateImage(cl_context context,
- cl_mem_flags flags,
- const cl_image_format *image_format,
- const cl_image_desc *image_desc,
- void *host_ptr,
- cl_int *errcode_ret)
+cl_mem clCreateImage(cl_context context,
+ cl_mem_flags flags,
+ const cl_image_format *image_format,
+ const cl_image_desc *image_desc,
+ void *host_ptr,
+ cl_int *errcode_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clCreateImage_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(context, flags, image_format, image_desc, host_ptr, errcode_ret);
}
else
{
- if(errcode_ret != nullptr)
+ if (errcode_ret != nullptr)
{
*errcode_ret = CL_OUT_OF_RESOURCES;
}
@@ -972,14 +1076,12 @@ clCreateImage(cl_context context,
}
}
-cl_int clSetKernelExecInfo(cl_kernel kernel,
- cl_kernel_exec_info param_name,
- size_t param_value_size,
- const void *param_value)
+cl_int
+clSetKernelExecInfo(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void *param_value)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clSetKernelExecInfo_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(kernel, param_name, param_value_size, param_value);
}
@@ -989,23 +1091,166 @@ cl_int clSetKernelExecInfo(cl_kernel kernel,
}
}
-cl_mem
-clImportMemoryARM(cl_context context,
- cl_mem_flags flags,
- const cl_import_properties_arm *properties,
- void *memory,
- size_t size,
- cl_int *errcode_ret)
+void *clGetExtensionFunctionAddressForPlatform(cl_platform_id platform, const char *funcname)
+{
+ arm_compute::CLSymbols::get().load_default();
+ const auto func = arm_compute::CLSymbols::get().clGetExtensionFunctionAddressForPlatform_ptr;
+
+ if (func != nullptr)
+ {
+ return func(platform, funcname);
+ }
+
+ return nullptr;
+}
+
+cl_command_buffer_khr clCreateCommandBufferKHR(cl_uint num_queues,
+ const cl_command_queue *queues,
+ const cl_command_buffer_properties_khr *properties,
+ cl_int *errcode_ret)
+{
+ arm_compute::CLSymbols::get().load_default();
+ const auto func = arm_compute::CLSymbols::get().clCreateCommandBufferKHR_ptr;
+
+ if (func != nullptr)
+ {
+ return func(num_queues, queues, properties, errcode_ret);
+ }
+ else
+ {
+ if (errcode_ret != nullptr)
+ {
+ *errcode_ret = CL_INVALID_OPERATION;
+ }
+
+ return {};
+ }
+}
+
+cl_int clFinalizeCommandBufferKHR(cl_command_buffer_khr command_buffer)
+{
+ arm_compute::CLSymbols::get().load_default();
+ const auto func = arm_compute::CLSymbols::get().clFinalizeCommandBufferKHR_ptr;
+
+ if (func != nullptr)
+ {
+ return func(command_buffer);
+ }
+ else
+ {
+ return CL_INVALID_OPERATION;
+ }
+}
+
+cl_int clRetainCommandBufferKHR(cl_command_buffer_khr command_buffer)
+{
+ arm_compute::CLSymbols::get().load_default();
+ const auto func = arm_compute::CLSymbols::get().clRetainCommandBufferKHR_ptr;
+
+ if (func != nullptr)
+ {
+ return func(command_buffer);
+ }
+ else
+ {
+ return CL_INVALID_OPERATION;
+ }
+}
+
+cl_int clReleaseCommandBufferKHR(cl_command_buffer_khr command_buffer)
+{
+ arm_compute::CLSymbols::get().load_default();
+ const auto func = arm_compute::CLSymbols::get().clReleaseCommandBufferKHR_ptr;
+
+ if (func != nullptr)
+ {
+ return func(command_buffer);
+ }
+ else
+ {
+ return CL_INVALID_OPERATION;
+ }
+}
+
+cl_int clEnqueueCommandBufferKHR(cl_uint num_queues,
+ cl_command_queue *queues,
+ cl_command_buffer_khr command_buffer,
+ cl_uint num_events_in_wait_list,
+ const cl_event *event_wait_list,
+ cl_event *event)
+{
+ arm_compute::CLSymbols::get().load_default();
+ const auto func = arm_compute::CLSymbols::get().clEnqueueCommandBufferKHR_ptr;
+
+ if (func != nullptr)
+ {
+ return func(num_queues, queues, command_buffer, num_events_in_wait_list, event_wait_list, event);
+ }
+ else
+ {
+ return CL_INVALID_OPERATION;
+ }
+}
+
+cl_int clCommandNDRangeKernelKHR(cl_command_buffer_khr command_buffer,
+ cl_command_queue command_queue,
+ const cl_ndrange_kernel_command_properties_khr *properties,
+ cl_kernel kernel,
+ cl_uint work_dim,
+ const size_t *global_work_offset,
+ const size_t *global_work_size,
+ const size_t *local_work_size,
+ cl_uint num_sync_points_in_wait_list,
+ const cl_sync_point_khr *sync_point_wait_list,
+ cl_sync_point_khr *sync_point,
+ cl_mutable_command_khr *mutable_handle)
+{
+ arm_compute::CLSymbols::get().load_default();
+ const auto func = arm_compute::CLSymbols::get().clCommandNDRangeKernelKHR_ptr;
+
+ if (func != nullptr)
+ {
+ return func(command_buffer, command_queue, properties, kernel, work_dim, global_work_offset, global_work_size,
+ local_work_size, num_sync_points_in_wait_list, sync_point_wait_list, sync_point, mutable_handle);
+ }
+ else
+ {
+ return CL_INVALID_OPERATION;
+ }
+}
+
+cl_int clUpdateMutableCommandsKHR(cl_command_buffer_khr command_buffer,
+ const cl_mutable_base_config_khr *mutable_config)
+{
+ arm_compute::CLSymbols::get().load_default();
+ const auto func = arm_compute::CLSymbols::get().clUpdateMutableCommandsKHR_ptr;
+
+ if (func != nullptr)
+ {
+ return func(command_buffer, mutable_config);
+ }
+ else
+ {
+ return CL_INVALID_OPERATION;
+ }
+}
+
+cl_mem clImportMemoryARM(cl_context context,
+ cl_mem_flags flags,
+ const cl_import_properties_arm *properties,
+ void *memory,
+ size_t size,
+ cl_int *errcode_ret)
{
arm_compute::CLSymbols::get().load_default();
auto func = arm_compute::CLSymbols::get().clImportMemoryARM_ptr;
- if(func != nullptr)
+ if (func != nullptr)
{
return func(context, flags, properties, memory, size, errcode_ret);
}
else
{
- if(errcode_ret != nullptr)
+ if (errcode_ret != nullptr)
{
*errcode_ret = CL_OUT_OF_RESOURCES;
}
diff --git a/src/core/CL/cl_kernels/activation_float_helpers.h b/src/core/CL/cl_kernels/activation_float_helpers.h
index 91d7197889..02faae2369 100644
--- a/src/core/CL/cl_kernels/activation_float_helpers.h
+++ b/src/core/CL/cl_kernels/activation_float_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,7 +31,8 @@
#endif // GPU_ARCH == GPU_ARCH_BIFROST
// Hard-Swish
-#define hard_swish_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))
+#define hard_swish_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) \
+ (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))
// Logistic Activation
#define logistic_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x)))
@@ -49,13 +50,16 @@
#define lu_brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))
// Leaky RELU Activation
-#define lrelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0))
+#define lrelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) \
+ ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0))
// Soft RELU Activation
#define srelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (log((DATA_TYPE)1.0 + exp(x)))
// ELU Activation
-#define elu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))isgreaterequal(x, (DATA_TYPE)0.0)))
+#define elu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) \
+ (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, \
+ (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))isgreaterequal(x, (DATA_TYPE)0.0)))
// Absolute Activation
#define abs_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (fabs(x))
@@ -69,6 +73,10 @@
// Linear Activation
#define linear_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x))
+// GELU Activation
+#define gelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) \
+ (x * (DATA_TYPE)0.5 * ((DATA_TYPE)1.0 + erf(x / (DATA_TYPE)1.41421356237)))
+
// Identity Activation
#define identity_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x)
diff --git a/src/core/CL/cl_kernels/activation_quant_helpers.h b/src/core/CL/cl_kernels/activation_quant_helpers.h
index a32e4e94a3..c758ff1278 100644
--- a/src/core/CL/cl_kernels/activation_quant_helpers.h
+++ b/src/core/CL/cl_kernels/activation_quant_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,21 +51,26 @@ inline TYPE lu_brelu_op(TYPE x)
// Hard Swish Activation
inline TYPE hard_swish_op(TYPE x)
{
- return (x * ((min(max((TYPE)(x + (TYPE)3.f), (TYPE)0.f), (TYPE)6.f)) * (TYPE)0.166666667f));
+ return (x * ((min(max((TYPE)(x + (TYPE)3.f), (TYPE)0.f), (TYPE)6.f)) * (TYPE)0.166666667f));
+}
+
+inline TYPE identiy_op(TYPE x)
+{
+ return x;
}
#define ACTIVATION_OP2(op, x) op##_op(x)
-#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x)
+#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x)
#if defined(S1_VAL) && defined(S2_VAL)
#if defined(O1_VAL) && defined(O2_VAL)
#define PERFORM_ACTIVATION_QUANT(act, data) \
({ \
data = ACTIVATION_OP(act, data); \
- \
+ \
VEC_DATA_TYPE(float, VEC_SIZE) \
fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \
- \
+ \
fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \
data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \
})
@@ -73,17 +78,14 @@ inline TYPE hard_swish_op(TYPE x)
#define PERFORM_ACTIVATION_QUANT(act, data) \
({ \
data = ACTIVATION_OP(act, data); \
- \
+ \
VEC_DATA_TYPE(float, VEC_SIZE) \
fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \
- \
+ \
fdata = round((fdata) * ((float)S1_VAL / (float)S2_VAL)); \
data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \
})
#endif /* defined(O1_VAL) && defined(O2_VAL) */
#else /* defined(S1_VAL) && defined(S2_VAL) */
-#define PERFORM_ACTIVATION_QUANT(act, data) \
- ({ \
- data = ACTIVATION_OP(act, data); \
- })
+#define PERFORM_ACTIVATION_QUANT(act, data) ({ data = ACTIVATION_OP(act, data); })
#endif /* defined(S1_VAL) && defined(S2_VAL) */
diff --git a/src/core/CL/cl_kernels/batch_to_space.cl b/src/core/CL/cl_kernels/batch_to_space.cl
deleted file mode 100644
index 8a71985b02..0000000000
--- a/src/core/CL/cl_kernels/batch_to_space.cl
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#if defined(DATA_TYPE) && defined(BATCH_SIZE)
-/** Batch to space transformation. (NCHW)
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[in] batch_id The input tensor batch id
- * @param[in] block_shape_ptr Pointer to the source tensor. Supported data types: S32
- * @param[in] block_shape_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] block_shape_step_x block_shape_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] block_shape_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] block_shape_step_y block_shape_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void batch_to_space_nchw(
- TENSOR3D_DECLARATION(input),
- const int batch_id,
- VECTOR_DECLARATION(block_shape),
- TENSOR4D_DECLARATION(output))
-{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
- Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
-
- const int block_x = *((__global int *)vector_offset(&block, 0));
- const int block_y = *((__global int *)vector_offset(&block, 1));
-
- const int r = (BATCH_SIZE / (block_x * block_y));
- const int x = get_global_id(0);
- const int y = get_global_id(1);
- const int z = get_global_id(2);
- const int w = batch_id % r;
-
- const int out_x = x * block_x + (batch_id / r) % block_x;
- const int out_y = y * block_y + (batch_id / r) / block_x;
-
- *((__global DATA_TYPE *)tensor4D_offset(&out, out_x, out_y, z, w)) = *((__global DATA_TYPE *)in.ptr);
-}
-/** Batch to space transformation. (NHWC)
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[in] batch_id The input tensor batch id
- * @param[in] block_shape_ptr Pointer to the source tensor. Supported data types: S32
- * @param[in] block_shape_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] block_shape_step_x block_shape_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] block_shape_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] block_shape_step_y block_shape_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void batch_to_space_nhwc(
- TENSOR3D_DECLARATION(input),
- const int batch_id,
- VECTOR_DECLARATION(block_shape),
- TENSOR4D_DECLARATION(output))
-{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
- Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
-
- const int block_x = *((__global int *)vector_offset(&block, 0));
- const int block_y = *((__global int *)vector_offset(&block, 1));
-
- const int r = (BATCH_SIZE / (block_x * block_y));
- const int x = get_global_id(1);
- const int y = get_global_id(2);
- const int z = get_global_id(0);
- const int w = batch_id % r;
-
- const int out_x = x * block_x + (batch_id / r) % block_x;
- const int out_y = y * block_y + (batch_id / r) / block_x;
-
- *((__global DATA_TYPE *)tensor4D_offset(&out, z, out_x, out_y, w)) = *((__global DATA_TYPE *)in.ptr);
-}
-#endif // defined(DATA_TYPE) && defined(BATCH_SIZE)
-
-#if defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y)
-/** Batch to space transformation. (NCHW)
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
- * @note The block shape x must be passed at compile time using -DBLOCK_SHAPE_X. e.g. -DBLOCK_SHAPE_X=2
- * @note The block shape y must be passed at compile time using -DBLOCK_SHAPE_Y. e.g. -DBLOCK_SHAPE_Y=2
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[in] batch_id The input tensor batch id
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void batch_to_space_static_nchw(
- TENSOR3D_DECLARATION(input),
- const int batch_id,
- TENSOR4D_DECLARATION(output))
-{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
-
- const int block_x = BLOCK_SHAPE_X;
- const int block_y = BLOCK_SHAPE_Y;
-
- const int r = (BATCH_SIZE / (block_x * block_y));
- const int x = get_global_id(0);
- const int y = get_global_id(1);
- const int z = get_global_id(2);
- const int w = batch_id % r;
-
- const int out_x = x * block_x + (batch_id / r) % block_x;
- const int out_y = y * block_y + (batch_id / r) / block_x;
-
- *((__global DATA_TYPE *)tensor4D_offset(&out, out_x, out_y, z, w)) = *((__global DATA_TYPE *)in.ptr);
-}
-/** Batch to space transformation. (NHWC)
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
- * @note The block shape x must be passed at compile time using -DBLOCK_SHAPE_X. e.g. -DBLOCK_SHAPE_X=2
- * @note The block shape y must be passed at compile time using -DBLOCK_SHAPE_Y. e.g. -DBLOCK_SHAPE_Y=2
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[in] batch_id The input tensor batch id
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void batch_to_space_static_nhwc(
- TENSOR3D_DECLARATION(input),
- const int batch_id,
- TENSOR4D_DECLARATION(output))
-{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
-
- const int block_x = BLOCK_SHAPE_X;
- const int block_y = BLOCK_SHAPE_Y;
-
- const int r = (BATCH_SIZE / (block_x * block_y));
- const int x = get_global_id(1);
- const int y = get_global_id(2);
- const int z = get_global_id(0);
- const int w = batch_id % r;
-
- const int out_x = x * block_x + (batch_id / r) % block_x;
- const int out_y = y * block_y + (batch_id / r) / block_x;
-
- *((__global DATA_TYPE *)tensor4D_offset(&out, z, out_x, out_y, w)) = *((__global DATA_TYPE *)in.ptr);
-}
-#endif // defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/batchnormalization_layer.cl b/src/core/CL/cl_kernels/batchnormalization_layer.cl
deleted file mode 100644
index 89cbe4440e..0000000000
--- a/src/core/CL/cl_kernels/batchnormalization_layer.cl
+++ /dev/null
@@ -1,418 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#define ADD_OP(a, b) ((a) + (b))
-#define SUB_OP(a, b) ((a) - (b))
-#define MUL_OP(a, b) ((a) * (b))
-#define INVSQRT_OP(a) rsqrt((a))
-#define SQCVT_SAT(a) (a)
-
-#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(ACTIVATION_TYPE)
-#include "activation_float_helpers.h"
-
-/** Apply batch normalization.
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- *
- * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
- * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
- * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
- * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
- * @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
- * @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
- * @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
- * @param[in] beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
- * @param[in] beta_step_x beta_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
- * @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
- * @param[in] gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
- * @param[in] gamma_step_x gamma_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
- * @param[in] epsilon Epsilon parameter in the batch normalization equation
- */
-__kernel void batchnormalization_layer_nchw(TENSOR3D_DECLARATION(input),
-#ifndef IN_PLACE
- TENSOR3D_DECLARATION(output),
-#endif /* not IN_PLACE */
- VECTOR_DECLARATION(mean),
- VECTOR_DECLARATION(var),
-#ifndef USE_DEFAULT_BETA
- VECTOR_DECLARATION(beta),
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_GAMMA
- VECTOR_DECLARATION(gamma),
-#endif /* USE_DEFAULT_GAMMA */
- float epsilon)
-{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
-#ifdef IN_PLACE
- Tensor3D out = in;
-#else /* IN_PLACE */
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
-#endif /* IN_PLACE */
- Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
- Vector var = CONVERT_TO_VECTOR_STRUCT(var);
-#ifndef USE_DEFAULT_BETA
- Vector beta = CONVERT_TO_VECTOR_STRUCT(beta);
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_GAMMA
- Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma);
-#endif /* USE_DEFAULT_GAMMA */
-
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- data = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- denominator = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- numerator = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- x_bar = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- res = 0;
-
- const int current_slice = get_global_id(2);
-
- data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
- denominator = *((__global DATA_TYPE *)(var.ptr + current_slice * var.stride_x));
- denominator = INVSQRT_OP(ADD_OP(denominator, ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(epsilon))));
-
- // Calculate x bar and store results
- numerator = *((__global DATA_TYPE *)(mean.ptr + current_slice * mean.stride_x));
- numerator = SUB_OP(data, numerator);
- x_bar = MUL_OP(numerator, denominator);
-
-#ifndef USE_DEFAULT_GAMMA
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- gamma_vec = *((__global DATA_TYPE *)(gamma.ptr + current_slice * gamma.stride_x));
-
- res = MUL_OP(gamma_vec, x_bar);
-#else /* USE_DEFAULT_GAMMA */
- // gamma is equal to 1, no need to perform multiplications
- res = x_bar;
-#endif /* USE_DEFAULT_GAMMA */
-
-#ifndef USE_DEFAULT_BETA
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- beta_vec = *((__global DATA_TYPE *)(beta.ptr + current_slice * beta.stride_x));
- // beta is not zero, hence we need to perform the addition
- res = ADD_OP(res, beta_vec);
-#endif /* USE_DEFAULT_BETA */
-
- res = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, res, A_VAL, B_VAL);
-
- VSTORE(VEC_SIZE)
- (res, 0, (__global DATA_TYPE *)out.ptr);
-}
-
-/** Apply batch normalization on tensors with NHWC format.
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- *
- * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
- * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
- * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
- * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
- * @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
- * @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
- * @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
- * @param[in] beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
- * @param[in] beta_step_x beta_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
- * @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
- * @param[in] gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
- * @param[in] gamma_step_x gamma_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
- * @param[in] epsilon Epsilon parameter in the batch normalization equation
- */
-__kernel void batchnormalization_layer_nhwc(TENSOR3D_DECLARATION(input),
-#ifndef IN_PLACE
- TENSOR3D_DECLARATION(output),
-#endif /* not IN_PLACE */
- VECTOR_DECLARATION(mean),
- VECTOR_DECLARATION(var),
-#ifndef USE_DEFAULT_BETA
- VECTOR_DECLARATION(beta),
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_GAMMA
- VECTOR_DECLARATION(gamma),
-#endif /* USE_DEFAULT_GAMMA */
- float epsilon)
-{
- uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0);
-
- __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z;
-#ifdef IN_PLACE
- __global uchar *output_addr = input_ptr;
-#else /* IN_PLACE */
- __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z;
-#endif /* IN_PLACE */
- __global uchar *mean_addr = mean_ptr + mean_offset_first_element_in_bytes + x_offs;
- __global uchar *var_addr = var_ptr + var_offset_first_element_in_bytes + x_offs;
-#ifndef USE_DEFAULT_BETA
- __global uchar *beta_addr = beta_ptr + beta_offset_first_element_in_bytes + x_offs;
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_GAMMA
- __global uchar *gamma_addr = gamma_ptr + gamma_offset_first_element_in_bytes + x_offs;
-#endif /* USE_DEFAULT_GAMMA */
-
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- data = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- denominator = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- numerator = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- x_bar = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- res0 = 0;
-
- data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr);
- denominator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)var_addr);
- denominator = INVSQRT_OP(ADD_OP(denominator, ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(epsilon))));
-
- // Calculate x bar and store results
- numerator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)mean_addr);
- numerator = SUB_OP(data, numerator);
- x_bar = MUL_OP(numerator, denominator);
-
-#ifndef USE_DEFAULT_GAMMA
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- gamma_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)gamma_addr);
-
- res0 = MUL_OP(gamma_vec, x_bar);
-#else /* USE_DEFAULT_GAMMA */
- // gamma is equal to 1, no need to perform multiplications
- res0 = x_bar;
-#endif /* USE_DEFAULT_GAMMA */
-
-#ifndef USE_DEFAULT_BETA
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- beta_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)beta_addr);
- // beta is not zero, hence we need to perform the addition
- res0 = ADD_OP(res0, beta_vec);
-#endif /* USE_DEFAULT_BETA */
-
- res0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, res0, A_VAL, B_VAL);
-
- STORE_VECTOR_SELECT(res, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
-}
-#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE)*/
-
-#if defined(DATA_TYPE) && defined(EPSILON)
-/** OpenCL kernel to fuse the weights of convolution or depthwise convolution layer with batch normalization when the data layout is either NCHW or NHWC
- *
- * @note The input weights tensor is assumed 4D with the OFMs in the fourth dimension
- * @note Data type should be passed at compile time using the -DDATA_TYPE, e.g. -DDATA_TYPE=float
- * @note The third dimension of the input tensor should be passed at compile time when weights belong to a convolution layer using -DDIM2=size. e.g. -DDIM2=16.
- * For depthwise convolution weight do not pass DIM2
- * @note Data layout NHWC should be passed at compile time with -DNHWC. For data layout NCHW it is not required to pass any parameter
- * @note Batch normalization epsilon parameter should be passed at compile time using -DEPSILON=value. e.g. -DEPSILON=0.001f
- *
- * @param[in] w_ptr Pointer to the weights tensor. Supported data types: F16/F32
- * @param[in] w_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] w_step_x w_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] w_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] w_step_y w_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] w_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] w_step_z w_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] w_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] b_ptr (Optional) Pointer to the bias tensor. Supported data types: same as @p w_ptr
- * @param[in] b_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
- * @param[in] b_step_x (Optional) b_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] b_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
- * @param[in] b_step_y (Optional) b_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] b_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
- * @param[in] b_step_z (Optional) b_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] b_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
- * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p w_ptr
- * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
- * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
- * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p w_ptr
- * @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
- * @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
- * @param[out] w_fused_ptr (Optional) Pointer to the destination weights tensors. Supported data types: same as @p w_ptr
- * @param[in] w_fused_stride_x (Optional) Stride of the destination weights tensor in X dimension (in bytes)
- * @param[in] w_fused_step_x (Optional) w_fused_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] w_fused_stride_y (Optional) Stride of the destination weights tensor in Y dimension (in bytes)
- * @param[in] w_fused_step_y (Optional) w_fused_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] w_fused_stride_z (Optional) Stride of the destination weights tensor in Z dimension (in bytes)
- * @param[in] w_fused_step_z (Optional) w_fused_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] w_fused_offset_first_element_in_bytes (Optional) The offset of the first element in the destination weights tensor
- * @param[in] b_fused_ptr (Optional) Pointer to the destination bias tensor. Supported data types: same as @p w_ptr
- * @param[in] b_fused_stride_x (Optional) Stride of the destination bias tensor in X dimension (in bytes)
- * @param[in] b_fused_step_x (Optional) b_fused_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] b_fused_offset_first_element_in_bytes (Optional) The offset of the first element in the destination bias tensor
- * @param[in] beta_ptr (Optional) Pointer to the beta source tensor. Supported data types: same as @p w_ptr
- * @param[in] beta_stride_x (Optional) Stride of the beta source tensor in X dimension (in bytes)
- * @param[in] beta_step_x (Optional) beta_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] beta_offset_first_element_in_bytes (Optional) The offset of the first element in the beta source tensor
- * @param[in] gamma_ptr (Optional) Pointer to the gamma source tensor. Supported data types: same as @p w_ptr
- * @param[in] gamma_stride_x (Optional) Stride of the gamma source tensor in X dimension (in bytes)
- * @param[in] gamma_step_x (Optional) gamma_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] gamma_offset_first_element_in_bytes (Optional) The offset of the first element in the gamma source tensor
- */
-__kernel void fuse_batchnormalization_layer(TENSOR3D_DECLARATION(w),
-#if defined(BIAS)
- VECTOR_DECLARATION(b),
-#endif // defined(BIAS)
- VECTOR_DECLARATION(mean),
- VECTOR_DECLARATION(var)
-#ifndef IN_PLACE_W
- ,
- TENSOR3D_DECLARATION(w_fused)
-#endif // ifndef IN_PLACE_W
-#ifndef IN_PLACE_B
- ,
- VECTOR_DECLARATION(b_fused)
-#endif // ifndef IN_PLACE_B
-#if defined(BETA)
- ,
- VECTOR_DECLARATION(beta)
-#endif // defined(BETA)
-#if defined(GAMMA)
- ,
- VECTOR_DECLARATION(gamma)
-#endif // defined(GAMMA)
- )
-{
- int x = get_global_id(0);
- int y = get_global_id(1);
- int z = get_global_id(2);
-
-#if defined(DIM2)
- int c0 = z % DIM2;
- int c1 = z / DIM2;
-#else // ! defined(DIM2)
- int c0 = 0;
-#if defined(NHWC)
- int c1 = x;
-#else // defined(NHWC)
- int c1 = z;
-#endif // defined(NHWC)
-#endif // defined(DIM2)
-
- int w_offset = x * sizeof(DATA_TYPE) + y * w_stride_y + z * w_stride_z;
- int v_offset = c1 * sizeof(DATA_TYPE);
-
- DATA_TYPE w_old = 0.0f;
- DATA_TYPE b_old = 0.0f;
- DATA_TYPE w_new = 0.0f;
- DATA_TYPE b_new = 0.0f;
- DATA_TYPE gamma = 1.0f;
- DATA_TYPE mean = 0.0f;
- DATA_TYPE var = 1.0f;
- DATA_TYPE beta = 0.0f;
-
- w_old = *((__global DATA_TYPE *)(w_ptr + w_offset + w_offset_first_element_in_bytes));
- var = *((__global DATA_TYPE *)(var_ptr + v_offset + var_offset_first_element_in_bytes));
- mean = *((__global DATA_TYPE *)(mean_ptr + v_offset + mean_offset_first_element_in_bytes));
-
-#if defined(GAMMA)
- gamma = *((__global DATA_TYPE *)(gamma_ptr + v_offset + gamma_offset_first_element_in_bytes));
-#endif // defined(GAMMA)
-
- // Compute new weight
- w_new = (gamma * w_old) / (sqrt(var + EPSILON));
-
-#if defined(IN_PLACE_W)
- *((__global DATA_TYPE *)(w_ptr + w_offset + w_offset_first_element_in_bytes)) = w_new;
-#else // defined(IN_PLACE_W)
- *((__global DATA_TYPE *)(w_fused_ptr + w_offset + w_fused_offset_first_element_in_bytes)) = w_new;
-#endif // defined(IN_PLACE_W)
-
- // Compute bias
-#if !defined(DIM2) && defined(NHWC)
- if(z == 0 && y == 0)
-#else // !defined(DIM2) && defined(NHWC)
- if(x == 0 && y == 0 && c0 == 0)
-#endif // !defined(DIM2) && defined(NHWC)
- {
-#if defined(BIAS)
- b_old = *((__global DATA_TYPE *)(b_ptr + v_offset + b_offset_first_element_in_bytes));
-#endif // defined(BIAS)
-#if defined(BETA)
- beta = *((__global DATA_TYPE *)(beta_ptr + v_offset + beta_offset_first_element_in_bytes));
-#endif // defined(BETA)
-
- b_new = ((gamma * (b_old - mean)) / (sqrt(var + EPSILON))) + beta;
-
-#if defined(BIAS)
-
-#if defined(IN_PLACE_B)
- *((__global DATA_TYPE *)(b_ptr + v_offset + b_offset_first_element_in_bytes)) = b_new;
-#else // defined(IN_PLACE_B)
- *((__global DATA_TYPE *)(b_fused_ptr + v_offset + b_fused_offset_first_element_in_bytes)) = b_new;
-#endif // defined(IN_PLACE_B)
-
-#else // defined(BIAS)
-
-#ifndef IN_PLACE_B
- *((__global DATA_TYPE *)(b_fused_ptr + v_offset + b_fused_offset_first_element_in_bytes)) = b_new;
-#endif // ifndef IN_PLACE_B
-
-#endif // defined(BIAS)
- }
-}
-#endif // defined(DATA_TYPE) && defined(EPSILON) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/channel_shuffle.cl b/src/core/CL/cl_kernels/channel_shuffle.cl
deleted file mode 100644
index 9a87eb4af3..0000000000
--- a/src/core/CL/cl_kernels/channel_shuffle.cl
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
-* Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z)
-
-// Check valid VEC_SIZES
-#if VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16
-#error "Only vector sizes 4, 8 and 16 are supported"
-#endif // VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16
-
-#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-
-#define DIV_MOD_UINT(x, y, div_res, mod_res) \
- ({ \
- div_res = (uint)((x) * (float)(1.0f / (float)(y))); \
- uint r = div_res * (y); \
- mod_res = (x)-r; \
- })
-
-/** Performs channel shuffle when the data layout is NCHW. See https://arxiv.org/pdf/1707.01083.pdf for details.
- *
- * @note The vector size must be given as a preprocessor argument using -DVEC_SIZE=num. e.g. -DVEC_SIZE=4
- * @note The depth of the tensor must be given as a preprocessor argument using -DSRC_DIM_Z=num. e.g. -DSRC_DIM_Z=64
- * @note The number of groups must be given as a preprocessor argument using -DNUM_GROUPS=num_groups. e.g. -DNUM_GROUPS=2
- * @note The number of channels in each group must be given as a preprocessor argument using -DK=num. e.g. -DK=1
- * K is equal to num_channels / num_groups.
- *
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: All
- * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] src_step_w src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void channel_shuffle_nchw(TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst))
-{
- uint curr_channel = 0; // channel id of input
- uint batch_id = 0; // batch id
- uint group_id = 0; // group id
- uint channel_id = 0; // channel id within the group
-
- // Compute curr_channel and batch_id
- DIV_MOD_UINT(get_global_id(2), SRC_DIM_Z, batch_id, curr_channel);
-
- // Compute group_id and channel_id
- DIV_MOD_UINT(curr_channel, K, group_id, channel_id);
-
- const uint x = get_global_id(0) * VEC_SIZE;
- const uint y = get_global_id(1) * 2;
- const uint z = channel_id * NUM_GROUPS + group_id;
-
- // Load the Nx2 block
- const __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * src_stride_y + curr_channel * src_stride_z + batch_id * src_stride_w;
- TYPE u0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y));
- TYPE u1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y));
-
- // Store blocks
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z + batch_id * dst_stride_w;
- VSTORE(VEC_SIZE)
- (u0, 0, (__global DATA_TYPE *)(output_ptr + 0 * dst_stride_y));
- VSTORE(VEC_SIZE)
- (u1, 0, (__global DATA_TYPE *)(output_ptr + 1 * dst_stride_y));
-}
-
-#if VEC_SIZE == 4 && defined(LAST_ACCESSED)
-/** Performs channel shuffle when the data layout is NHWC. See https://arxiv.org/pdf/1707.01083.pdf for details.
- *
- * @note This implementation is only defined for VEC_SIZE = 4
- * @note This last element accessed along the first dimension must be given as a preprocessor argument using -DLAST_ACCESSED=num. e.g. -DLAST_ACCESSED=64 in order to prevent out-of-bound writes.
- * @note The vector size must be given as a preprocessor argument using -DVEC_SIZE=num. e.g. -DVEC_SIZE=4
- * @note The height of the tensor must be given as a preprocessor argument using -DSRC_DIM_Z=num. e.g. -DSRC_DIM_Z=64
- * @note The number of groups must be given as a preprocessor argument using -DNUM_GROUPS=num_groups. e.g. -DNUM_GROUPS=2
- * @note The number of channels in each group must be given as a preprocessor argument using -DK=num. e.g. -DK=1
- * K is equal to num_channels / num_groups.
- *
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: All
- * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] src_step_w src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void channel_shuffle_nhwc(TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst))
-{
- const uint curr_channel = min((uint)(get_global_id(0) * VEC_SIZE), (uint)LAST_ACCESSED); // input feature map
- uint channel_id0 = 0;
- uint channel_id1 = 0;
- uint channel_id2 = 0;
- uint channel_id3 = 0;
- uint group_id0 = 0;
- uint group_id1 = 0;
- uint group_id2 = 0;
- uint group_id3 = 0;
- uint y = 0;
- uint batch_id = 0;
-
- // Compute curr_channel and batch_id
- DIV_MOD_UINT(get_global_id(2), (uint)SRC_DIM_Z, batch_id, y);
-
- // Compute group_id and channel_id
- DIV_MOD_UINT(curr_channel + (uint)0, K, group_id0, channel_id0);
- DIV_MOD_UINT(curr_channel + (uint)1, K, group_id1, channel_id1);
- DIV_MOD_UINT(curr_channel + (uint)2, K, group_id2, channel_id2);
- DIV_MOD_UINT(curr_channel + (uint)3, K, group_id3, channel_id3);
-
- const uint x = get_global_id(1) * 2;
- const uint z0 = channel_id0 * (uint)NUM_GROUPS + group_id0;
- const uint z1 = channel_id1 * (uint)NUM_GROUPS + group_id1;
- const uint z2 = channel_id2 * (uint)NUM_GROUPS + group_id2;
- const uint z3 = channel_id3 * (uint)NUM_GROUPS + group_id3;
-
- // Load the Nx2 block
- const __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + curr_channel * sizeof(DATA_TYPE) + x * src_stride_y + y * src_stride_z + batch_id * src_stride_w;
- TYPE u0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y));
- TYPE u1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y));
-
- // Store blocks
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_stride_y + y * dst_stride_z + batch_id * dst_stride_w;
- *((__global DATA_TYPE *)(output_ptr + (uint)0 * dst_stride_y + z0 * sizeof(DATA_TYPE))) = u0.s0;
- *((__global DATA_TYPE *)(output_ptr + (uint)0 * dst_stride_y + z1 * sizeof(DATA_TYPE))) = u0.s1;
- *((__global DATA_TYPE *)(output_ptr + (uint)0 * dst_stride_y + z2 * sizeof(DATA_TYPE))) = u0.s2;
- *((__global DATA_TYPE *)(output_ptr + (uint)0 * dst_stride_y + z3 * sizeof(DATA_TYPE))) = u0.s3;
- *((__global DATA_TYPE *)(output_ptr + (uint)1 * dst_stride_y + z0 * sizeof(DATA_TYPE))) = u1.s0;
- *((__global DATA_TYPE *)(output_ptr + (uint)1 * dst_stride_y + z1 * sizeof(DATA_TYPE))) = u1.s1;
- *((__global DATA_TYPE *)(output_ptr + (uint)1 * dst_stride_y + z2 * sizeof(DATA_TYPE))) = u1.s2;
- *((__global DATA_TYPE *)(output_ptr + (uint)1 * dst_stride_y + z3 * sizeof(DATA_TYPE))) = u1.s3;
-}
-#endif // VEC_SIZE == 4 && defined(LAST_ACCESSED)
-#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z)
diff --git a/src/core/CL/cl_kernels/activation_layer.cl b/src/core/CL/cl_kernels/common/activation_layer.cl
index bc2c99b6c8..a04556a1ed 100644
--- a/src/core/CL/cl_kernels/activation_layer.cl
+++ b/src/core/CL/cl_kernels/common/activation_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/activation_layer_quant.cl b/src/core/CL/cl_kernels/common/activation_layer_quant.cl
index 66261019ab..38ee00b17a 100644
--- a/src/core/CL/cl_kernels/activation_layer_quant.cl
+++ b/src/core/CL/cl_kernels/common/activation_layer_quant.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/arg_min_max.cl b/src/core/CL/cl_kernels/common/arg_min_max.cl
index 6e57ed0af1..413fcf5333 100644
--- a/src/core/CL/cl_kernels/arg_min_max.cl
+++ b/src/core/CL/cl_kernels/common/arg_min_max.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "helpers.h"
+#include "tile_helpers.h"
#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE_OUTPUT)
@@ -52,246 +53,183 @@
#endif // defined(ARG_MAX)
#if defined(WIDTH)
-#if defined(ARG_MIN)
-#if defined(PREV_OUTPUT)
-/** Find index minimum value of a vector
- *
- * @param[in] input Pointer to the first value.
- *
- * @return index of the vector.
- */
-inline DATA_TYPE_OUTPUT arg_idx_min_prev_out(__global const DATA_TYPE *input, __global const DATA_TYPE_OUTPUT *prev_res, const int x_idx)
+
+#if defined(ARG_MAX)
+#define VECTOR_PREDICATE_EQ(x, y) ((x) >= (y))
+#define VECTOR_PREDICATE(x, y) ((x) > (y))
+#define SCALAR_SELECT_OP(x, y) ((x) > (y)) ? (x) : (y);
+#elif defined(ARG_MIN)
+#define VECTOR_PREDICATE_EQ(x, y) ((x) <= (y))
+#define VECTOR_PREDICATE(x, y) ((x) < (y))
+#define SCALAR_SELECT_OP(x, y) ((x) < (y)) ? (x) : (y);
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+#error "Unsupported reduction operation!"
+#endif // defined(ARG_MAX)
+
+inline DATA_TYPE_OUTPUT vectorized_compute_arg_min_max_2(DATA_TYPE *min_max_val, DATA_TYPE_OUTPUT *min_max_idx, VEC_DATA_TYPE(DATA_TYPE, 2) in, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 2) res)
{
- int end_elem = (x_idx + 1) * 16;
- if(end_elem > WIDTH)
+ if( VECTOR_PREDICATE_EQ(in.s0,in.s1) )
{
- end_elem = WIDTH - x_idx * 16;
+ *min_max_val = in.s0;
+ *min_max_idx = res.s0;
}
- DATA_TYPE_OUTPUT res = prev_res[0];
- for(int x_v = 1; x_v < end_elem; ++x_v)
+ else
{
- res = select(res, prev_res[x_v], *(input + prev_res[x_v]) < * (input + res));
+ *min_max_val = in.s1;
+ *min_max_idx = res.s1;
}
- return res;
}
-#else // !defined(PREV_OUTPUT)
-/** Find index minimum value of a vector
- *
- * @param[in] input Pointer to the first value.
- *
- * @return index of the vector.
- */
-inline DATA_TYPE_OUTPUT arg_idx_min(__global const DATA_TYPE *input, const int x_idx)
+
+inline DATA_TYPE_OUTPUT vectorized_compute_arg_min_max_4(DATA_TYPE *min_max_val, DATA_TYPE_OUTPUT *min_max_idx, VEC_DATA_TYPE(DATA_TYPE, 4) in, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 4) res)
{
-#if WIDTH < 16
- DATA_TYPE_OUTPUT res = 0;
- for(DATA_TYPE_OUTPUT x_v = res + 1; x_v < WIDTH; ++x_v)
- {
- res = select(res, x_v, *(input + x_v) < * (input + res));
- }
- return res;
-#else // WIDTH >= 16
- int x_elem = x_idx * 16;
- const int x_goback = select(0, 16 - WIDTH % 16, x_elem + 16 > WIDTH);
- x_elem -= x_goback;
-
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = vload16(0, input - x_goback);
- VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
- res = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
-
- SIGNED_INT_VEC_DATA_TYPE(DATA_TYPE, 8)
- idx_sel = (in.s01234567 <= in.s89abcdef);
- in.s01234567 = select(in.s89abcdef, in.s01234567, idx_sel);
- res.s01234567 = select(res.s89abcdef, res.s01234567, CONVERT(idx_sel, int8));
+ VEC_DATA_TYPE(COND_DATA_TYPE, 2)
+ idx_sel = VECTOR_PREDICATE_EQ(in.s01, in.s23);
+ in.s01 = select(in.s23, in.s01, idx_sel);
+ res.s01 = select(res.s23, res.s01, CONVERT(idx_sel, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 2) ));
+ idx_sel.s0 = VECTOR_PREDICATE(in.s0, in.s1) || (in.s0 == in.s1 && CONVERT((res.s0 < res.s1), COND_DATA_TYPE));
+ res.s0 = select(res.s1, res.s0, CONVERT(idx_sel.s0, DATA_TYPE_OUTPUT));
+ *min_max_val = SCALAR_SELECT_OP(in.s0, in.s1);
+ *min_max_idx = res.s0;
+}
- idx_sel.s0123 = (in.s0123 < in.s4567) || (in.s0123 == in.s4567 && CONVERT((res.s0123 < res.s4567), SIGNED_INT_VEC_DATA_TYPE(DATA_TYPE, 4)));
+inline DATA_TYPE_OUTPUT vectorized_compute_arg_min_max_8(DATA_TYPE *min_max_val, DATA_TYPE_OUTPUT *min_max_idx, VEC_DATA_TYPE(DATA_TYPE, 8) in, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 8) res)
+{
+ VEC_DATA_TYPE(COND_DATA_TYPE, 4)
+ idx_sel = VECTOR_PREDICATE_EQ(in.s0123, in.s4567);
+ in.s0123 = select(in.s4567, in.s0123, idx_sel);
+ res.s0123 = select(res.s4567, res.s0123, CONVERT(idx_sel, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 4) ));
+ idx_sel.s01 = (VECTOR_PREDICATE(in.s01, in.s23)) || (in.s01 == in.s23 && CONVERT(((res.s01 < res.s23)), VEC_DATA_TYPE(COND_DATA_TYPE, 2)));
+ in.s01 = select(in.s23, in.s01, idx_sel.s01);
+ res.s01 = select(res.s23, res.s01, CONVERT(idx_sel.s01, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 2) ));
+ idx_sel.s0 = VECTOR_PREDICATE(in.s0, in.s1) || (in.s0 == in.s1 && CONVERT((res.s0 < res.s1), COND_DATA_TYPE));
+ res.s0 = select(res.s1, res.s0, CONVERT(idx_sel.s0, DATA_TYPE_OUTPUT));
+ *min_max_val = SCALAR_SELECT_OP(in.s0, in.s1);
+ *min_max_idx = res.s0;
+}
+
+inline DATA_TYPE_OUTPUT vectorized_compute_arg_min_max_16(DATA_TYPE *min_max_val, DATA_TYPE_OUTPUT *min_max_idx, VEC_DATA_TYPE(DATA_TYPE, 16) in, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16) res)
+{
+ VEC_DATA_TYPE(COND_DATA_TYPE, 8)
+ idx_sel = VECTOR_PREDICATE_EQ(in.s01234567, in.s89abcdef);
+ in.s01234567 = select(in.s89abcdef, in.s01234567, idx_sel);
+ res.s01234567 = select(res.s89abcdef, res.s01234567, CONVERT(idx_sel, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 8) ));
+ idx_sel.s0123 = VECTOR_PREDICATE(in.s0123, in.s4567) || (in.s0123 == in.s4567 && CONVERT(((res.s0123 < res.s4567)), VEC_DATA_TYPE(COND_DATA_TYPE, 4)));
in.s0123 = select(in.s4567, in.s0123, idx_sel.s0123);
- res.s0123 = select(res.s4567, res.s0123, CONVERT(idx_sel.s0123, int4));
+ res.s0123 = select(res.s4567, res.s0123, CONVERT(idx_sel.s0123, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 4) ));
+ idx_sel.s01 = (VECTOR_PREDICATE(in.s01, in.s23)) || (in.s01 == in.s23 && CONVERT(((res.s01 < res.s23)), VEC_DATA_TYPE(COND_DATA_TYPE, 2)));
+ in.s01 = select(in.s23, in.s01, idx_sel.s01);
+ res.s01 = select(res.s23, res.s01, CONVERT(idx_sel.s01, VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 2) ));
+ idx_sel.s0 = VECTOR_PREDICATE(in.s0, in.s1) || (in.s0 == in.s1 && CONVERT((res.s0 < res.s1), COND_DATA_TYPE));
+ res.s0 = select(res.s1, res.s0, CONVERT(idx_sel.s0, DATA_TYPE_OUTPUT));
+ *min_max_val = SCALAR_SELECT_OP(in.s0, in.s1);
+ *min_max_idx = res.s0;
+}
- idx_sel.s01 = (in.s01 < in.s23) || (in.s01 == in.s23 && CONVERT((res.s01 < res.s23), SIGNED_INT_VEC_DATA_TYPE(DATA_TYPE, 2)));
- in.s01 = select(in.s23, in.s01, idx_sel.s01);
- res.s01 = select(res.s23, res.s01, CONVERT(idx_sel.s01, int2));
- idx_sel.s0 = (in.s0 < in.s1) || (in.s0 == in.s1 && CONVERT((res.s0 < res.s1), SIGNED_INT_DATA_TYPE(DATA_TYPE)));
- res.s0 = select(res.s1, res.s0, CONVERT(idx_sel.s0, int));
- return res.s0 + x_elem;
-#endif // WIDTH < 16
-}
-#endif // defined(PREV_OUTPUT)
-#endif // defined(ARG_MIN)
-#if defined(ARG_MAX)
-#if defined(PREV_OUTPUT)
-/** Find index maximum value of a vector
- *
- * @param[in] input Pointer to the first value.
- *
- * @return index of the vector.
- */
-inline DATA_TYPE_OUTPUT arg_idx_max_prev_out(__global const DATA_TYPE *input, __global const DATA_TYPE_OUTPUT *prev_res, const int x_idx)
+inline void scalar_compute_global_min_max(DATA_TYPE in_val, int idx, DATA_TYPE *out_min_max_val, DATA_TYPE_OUTPUT *out_idx)
{
- int end_elem = (x_idx + 1) * 16;
- if(end_elem > WIDTH)
- {
- end_elem = WIDTH - x_idx * 16;
- }
- DATA_TYPE_OUTPUT res = prev_res[0];
- for(int x_v = 1; x_v < end_elem; ++x_v)
+#if defined(ARG_MAX)
+ if(in_val > *out_min_max_val)
+#else // defined(ARG_MAX)
+ if(in_val < *out_min_max_val)
+#endif // defined(ARG_MAX)
{
- res = select(res, prev_res[x_v], *(input + prev_res[x_v]) > *(input + res));
+ *out_min_max_val = in_val;
+ *out_idx = idx;
}
- return res;
}
-#else // !defined(PREV_OUTPUT)
-/** Find index maximum value of a vector
- *
- * @param[in] input Pointer to the first value.
- *
- * @return index of the vector.
- */
-inline DATA_TYPE_OUTPUT arg_idx_max(__global const DATA_TYPE *input, const int x_idx)
-{
-#if WIDTH < 16
- DATA_TYPE_OUTPUT res = 0;
- for(DATA_TYPE_OUTPUT x_v = res + 1; x_v < WIDTH; ++x_v)
- {
- res = select(res, x_v, *(input + x_v) > *(input + res));
- }
- return res;
-#else // WIDTH >= 16
- int x_elem = x_idx * 16;
- const int x_goback = select(0, 16 - WIDTH % 16, x_elem + 16 > WIDTH);
- x_elem -= x_goback;
-
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = vload16(0, input - x_goback);
- VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
- res = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
-
- SIGNED_INT_VEC_DATA_TYPE(DATA_TYPE, 8)
- idx_sel = (in.s01234567 >= in.s89abcdef);
- in.s01234567 = select(in.s89abcdef, in.s01234567, idx_sel);
- res.s01234567 = select(res.s89abcdef, res.s01234567, CONVERT(idx_sel, int8));
-
- idx_sel.s0123 = (in.s0123 > in.s4567) || (in.s0123 == in.s4567 && CONVERT((res.s0123 < res.s4567), SIGNED_INT_VEC_DATA_TYPE(DATA_TYPE, 4)));
- in.s0123 = select(in.s4567, in.s0123, idx_sel.s0123);
- res.s0123 = select(res.s4567, res.s0123, CONVERT(idx_sel.s0123, int4));
- idx_sel.s01 = (in.s01 > in.s23) || (in.s01 == in.s23 && CONVERT((res.s01 < res.s23), SIGNED_INT_VEC_DATA_TYPE(DATA_TYPE, 2)));
- in.s01 = select(in.s23, in.s01, idx_sel.s01);
- res.s01 = select(res.s23, res.s01, CONVERT(idx_sel.s01, int2));
-
- idx_sel.s0 = (in.s0 > in.s1) || (in.s0 == in.s1 && CONVERT((res.s0 < res.s1), SIGNED_INT_DATA_TYPE(DATA_TYPE)));
- res.s0 = select(res.s1, res.s0, CONVERT(idx_sel.s0, int));
-
- return res.s0 + x_elem;
-#endif // WIDTH < 16
+#if VEC_SIZE > 1
+#if VEC_SIZE == 16
+ #define VECTORIZED_OP(min_max_val,min_max_idx,in,res) vectorized_compute_arg_min_max_16(min_max_val,min_max_idx,in,res)
+#elif VEC_SIZE == 8 // #if VEC_SIZE == 16
+ #define VECTORIZED_OP(min_max_val,min_max_idx,in,res) vectorized_compute_arg_min_max_8(min_max_val,min_max_idx,in,res)
+#elif VEC_SIZE == 4 // # elif VEC_SIZE == 8
+ #define VECTORIZED_OP(min_max_val,min_max_idx,in,res) vectorized_compute_arg_min_max_4(min_max_val,min_max_idx,in,res)
+#elif VEC_SIZE == 2 // elif VEC_SIZE == 4
+ #define VECTORIZED_OP(min_max_val,min_max_idx,in,res) vectorized_compute_arg_min_max_2(min_max_val,min_max_idx,in,res)
+#else // elif VEC_SIZE == 2
+ #error "Not supported"
+#endif // #if VEC_SIZE == 16
+
+inline VEC_DATA_TYPE(DATA_TYPE_OUTPUT, VEC_SIZE) init_idx_vector()
+{
+#if VEC_SIZE == 16
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, VEC_SIZE)
+ vidx = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
+#elif VEC_SIZE == 8 // #if VEC_SIZE == 16
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, VEC_SIZE)
+ vidx = { 0, 1, 2, 3, 4, 5, 6, 7 };
+#elif VEC_SIZE == 4 // elif VEC_SIZE == 8
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, VEC_SIZE)
+ vidx = { 0, 1, 2, 3 };
+#elif VEC_SIZE == 2 // elif VEC_SIZE == 4
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, VEC_SIZE)
+ vidx = { 0, 1 };
+#else // elif VEC_SIZE == 2
+#error "Not supported"
+#endif // #if VEC_SIZE == 16
+ return vidx;
}
-#endif // defined(PREV_OUTPUT)
-#endif // defined(ARG_MAX)
+#endif // VEC_SIZE > 1
-/** This kernel performs parallel reduction given an operation on x-axis.
+/** This kernel performs reduction on x-axis.
*
- * @note In case the results of previous stages are passed the flag PREV_OUTPUT has to be passed using -DPREV_OUTPUT
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The input data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
* @note The data type of the output must be passed at compile time using -DDATA_TYPE_OUTPUT: e.g. -DDATA_TYPE_OUTPUT=uint
- * @note The arg_max flag must be passed at compile time using -DARG_MAX if we want to compute the ArgMax
- * @note The arg_min flag must be passed at compile time using -DARG_MIN if we want to compute the ArgMin
+ * @note The data type used for the comparing indexe must be passed at compile type using -DCOND_DATA_TYPE: e.g -DCOND_DATA_TYPE=uint
+ * @note The height size must be passed at compile time using -DHEIGHT e.g. -DHEIGHT=128
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] prev_res_ptr (Optional) Pointer to previous results tensor. Supported data types: U32/S32
- * @param[in] prev_res_stride_x (Optional) Stride of the output tensor in X dimension (in bytes)
- * @param[in] prev_res_step_x (Optional) prev_res_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] prev_res_stride_y (Optional) Stride of the output tensor in Y dimension (in bytes)
- * @param[in] prev_res_step_y (Optional) prev_res_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] prev_res_offset_first_element_in_bytes (Optional) The offset of the first element in the previous results tensor
- * @param[in] partial_res_ptr The local buffer to hold partial result values. Supported data types: U32/S32
- * @param[in] partial_res_stride_x Stride of the output tensor in X dimension (in bytes)
- * @param[in] partial_res_step_x partial_res_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] partial_res_stride_y Stride of the output tensor in Y dimension (in bytes)
- * @param[in] partial_res_step_y partial_res_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] partial_res_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] local_results Local buffer for storing the partial result
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] output_ptr The local buffer to hold sumed values. Supported data types: U32/S32
+ * @param[in] output_stride_x Stride of the output tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
*/
__kernel void arg_min_max_x(
- IMAGE_DECLARATION(src),
-#if defined(PREV_OUTPUT)
- IMAGE_DECLARATION(prev_res),
-#endif // defined(PREV_OUTPUT)
- IMAGE_DECLARATION(partial_res),
- __local DATA_TYPE_OUTPUT *local_results)
+ IMAGE_DECLARATION(input),
+ IMAGE_DECLARATION(output))
{
-#if defined(PREV_OUTPUT)
- Image src = CONVERT_TO_IMAGE_STRUCT_NO_STEP(src);
- Image prev_res = CONVERT_TO_IMAGE_STRUCT(prev_res);
-#else // !defined(PREV_OUTPUT)
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
-#endif // defined(PREV_OUTPUT)
- Image partial_res = CONVERT_TO_IMAGE_STRUCT(partial_res);
-
- unsigned int lsize = get_local_size(0);
- unsigned int lid = get_local_id(0);
-
- const uint x_idx = get_global_id(0);
- const uint y_idx = get_global_id(1);
- const __global DATA_TYPE *src_in_row = (const __global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + y_idx * src_step_y);
-
- for(unsigned int y = 0; y < get_local_size(1); ++y)
+ __global DATA_TYPE *input_addr = (__global DATA_TYPE *)(input_ptr + input_offset_first_element_in_bytes + get_global_id(1) * input_stride_y);
+ __global DATA_TYPE_OUTPUT *output_addr = (__global DATA_TYPE_OUTPUT *)(output_ptr + output_offset_first_element_in_bytes + get_global_id(1) * output_stride_y);
+
+ DATA_TYPE final_value = input_addr[0];
+ DATA_TYPE_OUTPUT final_idx = 0;
+
+#if VEC_SIZE > 1
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, VEC_SIZE)
+ vidx = init_idx_vector();
+
+ int x = 0;
+ for(; x <= (WIDTH - VEC_SIZE); x += VEC_SIZE)
{
-#if defined(ARG_MAX)
-#if defined(PREV_OUTPUT)
- local_results[lid] = arg_idx_max_prev_out(src_in_row, (__global DATA_TYPE_OUTPUT *)offset(&prev_res, 0, y), x_idx);
-#else // !defined(PREV_OUTPUT)
- local_results[lid] = arg_idx_max((__global DATA_TYPE *)offset(&src, 0, y), x_idx);
-#endif // defined(PREV_OUTPUT)
-#else // defined(ARG_MIN)
-#if defined(PREV_OUTPUT)
- local_results[lid] = arg_idx_min_prev_out(src_in_row, (__global DATA_TYPE_OUTPUT *)offset(&prev_res, 0, y), x_idx);
-#else // !defined(PREV_OUTPUT)
- local_results[lid] = arg_idx_min((__global DATA_TYPE *)offset(&src, 0, y), x_idx);
-#endif // defined(PREV_OUTPUT)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
-
- barrier(CLK_LOCAL_MEM_FENCE);
-
- // Looking for the next highest power of 2 (maximum value of lsize is 8)
- unsigned int middle = lsize - 1;
- middle |= middle >> 1;
- middle |= middle >> 2;
- middle += 1;
- // Perform parallel reduction
- for(unsigned int i = middle; i > 0; i >>= 1)
- {
- if(lid < i && lid + i < lsize)
- {
- DATA_TYPE tmp0 = *(src_in_row + local_results[lid]);
- DATA_TYPE tmp1 = *(src_in_row + local_results[lid + i]);
-#if defined(ARG_MAX)
- local_results[lid] = select(
- local_results[lid],
- local_results[lid + i],
- ((tmp0 == tmp1) && (local_results[lid + i] < local_results[lid])) || (tmp0 < tmp1));
-#else // defined(ARG_MIN)
- local_results[lid] = select(
- local_results[lid],
- local_results[lid + i],
- ((tmp0 == tmp1) && (local_results[lid + i] < local_results[lid])) || (tmp0 > tmp1));
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
-
- if(lid == 0)
- {
- ((__global DATA_TYPE_OUTPUT *)offset(&partial_res, get_group_id(0), y))[0] = local_results[0];
- }
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ vals = VLOAD(VEC_SIZE)(0, (input_addr + x));
+ DATA_TYPE local_min_max_value;
+ DATA_TYPE_OUTPUT local_min_max_idx;
+
+ VECTORIZED_OP(&local_min_max_value, &local_min_max_idx, vals, vidx);
+ local_min_max_idx += x;
+ scalar_compute_global_min_max(local_min_max_value, local_min_max_idx, &final_value, &final_idx);
}
+#endif // VEC_SIZE > 1
+
+#if(WIDTH % VEC_SIZE)
+ LOOP_UNROLLING(int, j, 0, 1, WIDTH % VEC_SIZE,
+ {
+ scalar_compute_global_min_max(*(input_addr + j + x), j + x, &final_value, &final_idx);
+ })
+#endif // (WIDTH % VEC_SIZE)
+
+ output_addr[0] = final_idx;
}
#endif // defined(WIDTH)
@@ -320,8 +258,7 @@ __kernel void arg_min_max_y(
IMAGE_DECLARATION(input),
IMAGE_DECLARATION(output))
{
- const int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
-
+ const int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
__global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * input_stride_y;
__global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE_OUTPUT) + get_global_id(1) * output_stride_y;
@@ -448,4 +385,4 @@ __kernel void arg_min_max_w(
STORE_VECTOR_SELECT(indx, DATA_TYPE_OUTPUT, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif /* defined(BATCH) && defined(DEPTH) */
-#endif // defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE_OUTPUT) \ No newline at end of file
+#endif // defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE_OUTPUT)
diff --git a/src/core/CL/cl_kernels/common/batchnormalization_layer.cl b/src/core/CL/cl_kernels/common/batchnormalization_layer.cl
new file mode 100644
index 0000000000..18f54907df
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/batchnormalization_layer.cl
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(EPSILON)
+/** OpenCL kernel to fuse the weights of convolution or depthwise convolution layer with batch normalization when the data layout is either NCHW or NHWC
+ *
+ * @note The input weights tensor is assumed 4D with the OFMs in the fourth dimension
+ * @note Data type should be passed at compile time using the -DDATA_TYPE, e.g. -DDATA_TYPE=float
+ * @note The third dimension of the input tensor should be passed at compile time when weights belong to a convolution layer using -DDIM2=size. e.g. -DDIM2=16.
+ * For depthwise convolution weight do not pass DIM2
+ * @note Data layout NHWC should be passed at compile time with -DNHWC. For data layout NCHW it is not required to pass any parameter
+ * @note Batch normalization epsilon parameter should be passed at compile time using -DEPSILON=value. e.g. -DEPSILON=0.001f
+ *
+ * @param[in] w_ptr Pointer to the weights tensor. Supported data types: F16/F32
+ * @param[in] w_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] w_step_x w_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] w_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] w_step_y w_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] w_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] w_step_z w_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] w_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] b_ptr (Optional) Pointer to the bias tensor. Supported data types: same as @p w_ptr
+ * @param[in] b_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] b_step_x (Optional) b_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] b_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] b_step_y (Optional) b_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] b_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] b_step_z (Optional) b_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] b_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p w_ptr
+ * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
+ * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
+ * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p w_ptr
+ * @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
+ * @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
+ * @param[out] w_fused_ptr (Optional) Pointer to the destination weights tensors. Supported data types: same as @p w_ptr
+ * @param[in] w_fused_stride_x (Optional) Stride of the destination weights tensor in X dimension (in bytes)
+ * @param[in] w_fused_step_x (Optional) w_fused_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] w_fused_stride_y (Optional) Stride of the destination weights tensor in Y dimension (in bytes)
+ * @param[in] w_fused_step_y (Optional) w_fused_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] w_fused_stride_z (Optional) Stride of the destination weights tensor in Z dimension (in bytes)
+ * @param[in] w_fused_step_z (Optional) w_fused_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] w_fused_offset_first_element_in_bytes (Optional) The offset of the first element in the destination weights tensor
+ * @param[in] b_fused_ptr (Optional) Pointer to the destination bias tensor. Supported data types: same as @p w_ptr
+ * @param[in] b_fused_stride_x (Optional) Stride of the destination bias tensor in X dimension (in bytes)
+ * @param[in] b_fused_step_x (Optional) b_fused_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] b_fused_offset_first_element_in_bytes (Optional) The offset of the first element in the destination bias tensor
+ * @param[in] beta_ptr (Optional) Pointer to the beta source tensor. Supported data types: same as @p w_ptr
+ * @param[in] beta_stride_x (Optional) Stride of the beta source tensor in X dimension (in bytes)
+ * @param[in] beta_step_x (Optional) beta_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] beta_offset_first_element_in_bytes (Optional) The offset of the first element in the beta source tensor
+ * @param[in] gamma_ptr (Optional) Pointer to the gamma source tensor. Supported data types: same as @p w_ptr
+ * @param[in] gamma_stride_x (Optional) Stride of the gamma source tensor in X dimension (in bytes)
+ * @param[in] gamma_step_x (Optional) gamma_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] gamma_offset_first_element_in_bytes (Optional) The offset of the first element in the gamma source tensor
+ */
+__kernel void fuse_batchnormalization_layer(TENSOR3D_DECLARATION(w),
+#if defined(BIAS)
+ VECTOR_DECLARATION(b),
+#endif // defined(BIAS)
+ VECTOR_DECLARATION(mean),
+ VECTOR_DECLARATION(var)
+#ifndef IN_PLACE_W
+ ,
+ TENSOR3D_DECLARATION(w_fused)
+#endif // ifndef IN_PLACE_W
+#ifndef IN_PLACE_B
+ ,
+ VECTOR_DECLARATION(b_fused)
+#endif // ifndef IN_PLACE_B
+#if defined(BETA)
+ ,
+ VECTOR_DECLARATION(beta)
+#endif // defined(BETA)
+#if defined(GAMMA)
+ ,
+ VECTOR_DECLARATION(gamma)
+#endif // defined(GAMMA)
+ )
+{
+ int x = get_global_id(0);
+ int y = get_global_id(1);
+ int z = get_global_id(2);
+
+#if defined(DIM2)
+ int c0 = z % DIM2;
+ int c1 = z / DIM2;
+#else // ! defined(DIM2)
+ int c0 = 0;
+#if defined(NHWC)
+ int c1 = x;
+#else // defined(NHWC)
+ int c1 = z;
+#endif // defined(NHWC)
+#endif // defined(DIM2)
+
+ int w_offset = x * sizeof(DATA_TYPE) + y * w_stride_y + z * w_stride_z;
+ int v_offset = c1 * sizeof(DATA_TYPE);
+
+ DATA_TYPE w_old = 0.0f;
+ DATA_TYPE b_old = 0.0f;
+ DATA_TYPE w_new = 0.0f;
+ DATA_TYPE b_new = 0.0f;
+ DATA_TYPE gamma = 1.0f;
+ DATA_TYPE mean = 0.0f;
+ DATA_TYPE var = 1.0f;
+ DATA_TYPE beta = 0.0f;
+
+ w_old = *((__global DATA_TYPE *)(w_ptr + w_offset + w_offset_first_element_in_bytes));
+ var = *((__global DATA_TYPE *)(var_ptr + v_offset + var_offset_first_element_in_bytes));
+ mean = *((__global DATA_TYPE *)(mean_ptr + v_offset + mean_offset_first_element_in_bytes));
+
+#if defined(GAMMA)
+ gamma = *((__global DATA_TYPE *)(gamma_ptr + v_offset + gamma_offset_first_element_in_bytes));
+#endif // defined(GAMMA)
+
+ // Compute new weight
+ w_new = (gamma * w_old) / (sqrt(var + EPSILON));
+
+#if defined(IN_PLACE_W)
+ *((__global DATA_TYPE *)(w_ptr + w_offset + w_offset_first_element_in_bytes)) = w_new;
+#else // defined(IN_PLACE_W)
+ *((__global DATA_TYPE *)(w_fused_ptr + w_offset + w_fused_offset_first_element_in_bytes)) = w_new;
+#endif // defined(IN_PLACE_W)
+
+ // Compute bias
+#if !defined(DIM2) && defined(NHWC)
+ if(z == 0 && y == 0)
+#else // !defined(DIM2) && defined(NHWC)
+ if(x == 0 && y == 0 && c0 == 0)
+#endif // !defined(DIM2) && defined(NHWC)
+ {
+#if defined(BIAS)
+ b_old = *((__global DATA_TYPE *)(b_ptr + v_offset + b_offset_first_element_in_bytes));
+#endif // defined(BIAS)
+#if defined(BETA)
+ beta = *((__global DATA_TYPE *)(beta_ptr + v_offset + beta_offset_first_element_in_bytes));
+#endif // defined(BETA)
+
+ b_new = ((gamma * (b_old - mean)) / (sqrt(var + EPSILON))) + beta;
+
+#if defined(BIAS)
+
+#if defined(IN_PLACE_B)
+ *((__global DATA_TYPE *)(b_ptr + v_offset + b_offset_first_element_in_bytes)) = b_new;
+#else // defined(IN_PLACE_B)
+ *((__global DATA_TYPE *)(b_fused_ptr + v_offset + b_fused_offset_first_element_in_bytes)) = b_new;
+#endif // defined(IN_PLACE_B)
+
+#else // defined(BIAS)
+
+#ifndef IN_PLACE_B
+ *((__global DATA_TYPE *)(b_fused_ptr + v_offset + b_fused_offset_first_element_in_bytes)) = b_new;
+#endif // ifndef IN_PLACE_B
+
+#endif // defined(BIAS)
+ }
+}
+#endif // defined(DATA_TYPE) && defined(EPSILON) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/bitwise_op.cl b/src/core/CL/cl_kernels/common/bitwise_op.cl
index a600bced9e..e142c1d275 100644
--- a/src/core/CL/cl_kernels/bitwise_op.cl
+++ b/src/core/CL/cl_kernels/common/bitwise_op.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/bounding_box_transform.cl b/src/core/CL/cl_kernels/common/bounding_box_transform.cl
index a9b0496a6e..f2e9cb0ed0 100644
--- a/src/core/CL/cl_kernels/bounding_box_transform.cl
+++ b/src/core/CL/cl_kernels/common/bounding_box_transform.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#if defined(DATA_TYPE) && defined(WEIGHT_X) && defined(WEIGHT_Y) && defined(WEIGHT_W) && defined(WEIGHT_H) && defined(IMG_WIDTH) && defined(IMG_HEIGHT) && defined(BOX_FIELDS) && defined(SCALE_BEFORE) // Check for compile time constants
-/** Perform a padded copy of input tensor to the output tensor. Padding values are defined at compile time
+/** Transform proposal bounding boxes to target bounding box using bounding box deltas.
*
* @attention The following variables must be passed at compile time:
* -# -DDATA_TYPE= Tensor data type. Supported data types: F16/F32
diff --git a/src/core/CL/cl_kernels/bounding_box_transform_quantized.cl b/src/core/CL/cl_kernels/common/bounding_box_transform_quantized.cl
index 9e5cee55f4..c1d45a56b9 100644
--- a/src/core/CL/cl_kernels/bounding_box_transform_quantized.cl
+++ b/src/core/CL/cl_kernels/common/bounding_box_transform_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#if defined(DATA_TYPE) && defined(DATA_TYPE_DELTAS) && defined(WEIGHT_X) && defined(WEIGHT_Y) && defined(WEIGHT_W) && defined(WEIGHT_H) && defined(IMG_WIDTH) && defined(IMG_HEIGHT) && defined(BOX_FIELDS) && defined(SCALE_BEFORE) && defined(OFFSET_BOXES) && defined(SCALE_BOXES) && defined(OFFSET_DELTAS) && defined(SCALE_DELTAS) && defined(OFFSET_PRED_BOXES) && defined(SCALE_PRED_BOXES) // Check for compile time constants
-/** Perform a padded copy of input tensor to the output tensor for quantized data types. Padding values are defined at compile time
+/** Transform proposal bounding boxes to target bounding box using bounding box deltas for quantized data types.
*
* @attention The following variables must be passed at compile time:
* -# -DDATA_TYPE= Tensor data type. Supported data types: QASYMM16 for boxes and pred_boxes, QASYMM8 for for deltas
diff --git a/src/core/CL/cl_kernels/depth_convert.cl b/src/core/CL/cl_kernels/common/cast.cl
index a888d7b9bc..036a683ec7 100644
--- a/src/core/CL/cl_kernels/depth_convert.cl
+++ b/src/core/CL/cl_kernels/common/cast.cl
@@ -31,7 +31,7 @@
#define CONVERT_UP(x, type) CONVERT(x, type)
-/** This function performs a down-scaling depth conversion.
+/** This function performs a down-casting
*
* @attention For QSYMM8_PER_CHANNEL -> QASYMM8, it is user's responsibility to keep track of the quantization info.
*
@@ -56,12 +56,10 @@
* @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] shift The integer shift amount value. Supported data types: S32
*/
-__kernel void convert_depth_down(
+__kernel void cast_down(
TENSOR3D_DECLARATION(in),
- TENSOR3D_DECLARATION(out),
- const int shift)
+ TENSOR3D_DECLARATION(out))
{
int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
@@ -82,12 +80,12 @@ __kernel void convert_depth_down(
STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#else /* defined(IS_DATA_TYPE_FLOAT) */
VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
- res0 = CONVERT_DOWN(in_data >> shift, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
+ res0 = CONVERT_DOWN(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#endif /* defined(IS_DATA_TYPE_FLOAT) */
}
-/** This function performs a up-scaling depth conversion.
+/** This function performs a up-casting
*
* @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN and -DDATA_TYPE_OUT:
* e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short
@@ -110,12 +108,10 @@ __kernel void convert_depth_down(
* @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] shift The integer shift amount value. Supported data types: S32
*/
-__kernel void convert_depth_up(
+__kernel void cast_up(
TENSOR3D_DECLARATION(in),
- TENSOR3D_DECLARATION(out),
- const int shift)
+ TENSOR3D_DECLARATION(out))
{
int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
@@ -132,7 +128,7 @@ __kernel void convert_depth_up(
STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#else /* defined(IS_DATA_TYPE_FLOAT) */
VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
- res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)) << shift;
+ res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#endif /* defined(IS_DATA_TYPE_FLOAT) */
}
diff --git a/src/core/CL/cl_kernels/col2im.cl b/src/core/CL/cl_kernels/common/col2im.cl
index 59c2d8a3aa..4dc005fd43 100644
--- a/src/core/CL/cl_kernels/col2im.cl
+++ b/src/core/CL/cl_kernels/common/col2im.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -67,7 +67,7 @@ __kernel void col2im(
TENSOR4D_DECLARATION(dst))
{
Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- Tensor4D dst = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(dst, 0);
+ Tensor4D dst = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(dst);
const uint xd = get_global_id(1) % WIDTH_OUTPUT; // x coordinate of the destination tensor
const uint yd = get_global_id(1) / WIDTH_OUTPUT; // y coordinate of the destination tensor
diff --git a/src/core/CL/cl_kernels/common/comparisons.cl b/src/core/CL/cl_kernels/common/comparisons.cl
new file mode 100644
index 0000000000..00bb491f85
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/comparisons.cl
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#define EQUAL(x, y) ((x) == (y))
+#define NOTEQUAL(x, y) ((x) != (y))
+#define GREATER(x, y) ((x) > (y))
+#define GREATEREQUAL(x, y) ((x) >= (y))
+#define LESS(x, y) ((x) < (y))
+#define LESSEQUAL(x, y) ((x) <= (y))
+
+#ifdef IS_QUANTIZED
+# define DEFINE_KERNEL_STR(name) compare_##name##_quantized
+#else // IS_QUANTIZED
+# define DEFINE_KERNEL_STR(name) compare_##name
+#endif // IS_QUANTIZED
+
+#define DEFINE_KERNEL(name) DEFINE_KERNEL_STR(name)
+
+#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(OP) && defined(OP_NAME)
+/** This function compares two tensors.
+ *
+ * @attention The inputs' data type need to be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @attention The comparison operation should be given as a preprocessor argument using -DOP=operation. e.g. -DOP=LESS
+ *
+ * @param[in] in1_ptr Pointer to the source tensor. Supported data types: All non-quantized data types.
+ * @param[in] in1_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] in1_step_x in1_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in1_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] in1_step_y in1_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in1_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] in1_step_z in1_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] in1_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] in2_ptr Pointer to the source tensor. Supported data types: same as @p in1_ptr
+ * @param[in] in2_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] in2_step_x in2_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in2_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] in2_step_y in2_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in2_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] in2_step_z in2_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] in2_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] out_ptr Pointer to the destination tensor. Supported data types: U8
+ * @param[in] out_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] out_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void DEFINE_KERNEL(OP_NAME)(
+ TENSOR3D_DECLARATION(in1),
+ TENSOR3D_DECLARATION(in2),
+ TENSOR3D_DECLARATION(out))
+{
+ int dst_x = max((int)get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE, 0);
+
+#if VEC_SIZE_IN1 == 1
+ int in1_x = 0;
+#else // VEC_SIZE_IN1 == 1
+ int in1_x = dst_x;
+#endif // VEC_SIZE_IN1 == 1
+
+#if VEC_SIZE_IN2 == 1
+ int in2_x = 0;
+#else // VEC_SIZE_IN2 == 1
+ int in2_x = dst_x;
+#endif // VEC_SIZE_IN2 == 1
+
+ int y = get_global_id(1);
+ int z = get_global_id(2);
+
+ in1_ptr += in1_offset_first_element_in_bytes + z * in1_stride_z + y * in1_stride_y + in1_x * sizeof(DATA_TYPE);
+ in2_ptr += in2_offset_first_element_in_bytes + z * in2_stride_z + y * in2_stride_y + in2_x * sizeof(DATA_TYPE);
+ out_ptr += out_offset_first_element_in_bytes + z * out_stride_z + y * out_stride_y + dst_x * sizeof(uchar);
+
+ // Load values
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) in_a = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))VLOAD(VEC_SIZE_IN1)(0, (__global DATA_TYPE *)in1_ptr);
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) in_b = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))VLOAD(VEC_SIZE_IN2)(0, (__global DATA_TYPE *)in2_ptr);
+
+ // Calculate and store result
+#ifdef IS_QUANTIZED
+ VEC_DATA_TYPE(int, VEC_SIZE) in_a_i32 = CONVERT(in_a, VEC_DATA_TYPE(int, VEC_SIZE));
+ VEC_DATA_TYPE(int, VEC_SIZE) in_b_i32 = CONVERT(in_b, VEC_DATA_TYPE(int, VEC_SIZE));
+
+ VEC_DATA_TYPE(float, VEC_SIZE) in_a_fp = CONVERT(in_a_i32 - OFFSET_IN1, VEC_DATA_TYPE(float, VEC_SIZE)) * SCALE_IN1;
+ VEC_DATA_TYPE(float, VEC_SIZE) in_b_fp = CONVERT(in_b_i32 - OFFSET_IN2, VEC_DATA_TYPE(float, VEC_SIZE)) * SCALE_IN2;
+#else // IS_QUANTIZED
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) in_a_fp = in_a;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) in_b_fp = in_b;
+#endif // IS_QUANTIZED
+
+#if VEC_SIZE == 1
+ uchar res0 = (uchar)select(0, 255, OP(in_a_fp, in_b_fp));
+#else // VEC_SIZE == 1
+ VEC_DATA_TYPE(uchar, VEC_SIZE) res0 = CONVERT(OP(in_a_fp, in_b_fp), VEC_DATA_TYPE(uchar, VEC_SIZE));
+#endif // VEC_SIZE == 1
+
+ STORE_VECTOR_SELECT(res, uchar, out_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
+}
+#endif /* defined(DATA_TYPE) && defined(VEC_SIZE) && defined(OP) && defined(OP_NAME) */
diff --git a/src/core/CL/cl_kernels/concatenate.cl b/src/core/CL/cl_kernels/common/concatenate.cl
index d2e65408dc..dc7210a4c4 100644
--- a/src/core/CL/cl_kernels/concatenate.cl
+++ b/src/core/CL/cl_kernels/common/concatenate.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,19 +43,17 @@ inline VEC_QUANT requantize(VEC_QUANT input, float in_offset, float out_offset,
#if defined(DATA_TYPE)
#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-#if defined(DEPTH) && defined(ELEMENT_SIZE)
-#if defined(INPUT1_WIDTH)
+#if defined(ELEMENT_SIZE)
#define SELECT_TYPE SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
#define SEQ VEC_OFFS(int, VEC_SIZE)
+#if defined(CONCATENATE_WIDTH_X2)
/** This kernel concatenates two input tensors into the output tensor along the first dimension
*
* @note The data type has to be passed at compile time using -DDATA_TYPE. i.e. -DDATA_TYPE=float
* @note Vector size has to be passed at compile time using -DVEC_SIZE. i.e. -DVEC_SIZE=16
* @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
- * @note Tensor depth should be given as a preprocessor argument using -DDEPTH=size. e.g. -DDEPTH=16
- * @note First input tensor width should be given as a preprocessor argument using -DINPUT1_WIDTH=width. e.g. -DINPUT1_WIDTH=8
*
* @param[in] src1_ptr Pointer to the source tensor. Supported data types: All.
* @param[in] src1_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -87,11 +85,15 @@ inline VEC_QUANT requantize(VEC_QUANT input, float in_offset, float out_offset,
* @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] DEPTH Tensor depth
+ * @param[in] INPUT1_WIDTH First input tensor width
*/
__kernel void concatenate_width_x2(
TENSOR4D_DECLARATION(src1),
TENSOR4D_DECLARATION(src2),
- TENSOR4D_DECLARATION(dst))
+ TENSOR4D_DECLARATION(dst),
+ const int DEPTH,
+ const int INPUT1_WIDTH)
{
// Calculate input indices
const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
@@ -125,17 +127,15 @@ __kernel void concatenate_width_x2(
STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
+#endif // defined(CONCATENATE_WIDTH_X2)
-#if defined(INPUT2_WIDTH) && defined(INPUT3_WIDTH)
+#if defined(CONCATENATE_WIDTH_X4)
/** This kernel concatenates four input tensors into the output tensor along the first dimension
*
* @note The data type has to be passed at compile time using -DDATA_TYPE. i.e. -DDATA_TYPE=float
* @note Vector size has to be passed at compile time using -DVEC_SIZE. i.e. -DVEC_SIZE=16
* @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note Tensor depth should be given as a preprocessor argument using -DDEPTH=size. e.g. -DDEPTH=16
- * @note First input tensor width should be given as a preprocessor argument using -DINPUT1_WIDTH=width. e.g. -DINPUT1_WIDTH=8
- * @note Second input tensor width should be given as a preprocessor argument using -DINPUT2_WIDTH=width. e.g. -DINPUT2_WIDTH=8
- * @note Third input tensor width should be given as a preprocessor argument using -DINPUT3_WIDTH=width. e.g. -DINPUT3_WIDTH=8
*
* @param[in] src1_ptr Pointer to the source tensor. Supported data types: All
* @param[in] src1_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -187,13 +187,21 @@ __kernel void concatenate_width_x2(
* @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] DEPTH Tensor depth
+ * @param[in] INPUT1_WIDTH First input tensor width
+ * @param[in] INPUT2_WIDTH Second input tensor width
+ * @param[in] INPUT3_WIDTH Third input tensor width
*/
__kernel void concatenate_width_x4(
TENSOR4D_DECLARATION(src1),
TENSOR4D_DECLARATION(src2),
TENSOR4D_DECLARATION(src3),
TENSOR4D_DECLARATION(src4),
- TENSOR4D_DECLARATION(dst))
+ TENSOR4D_DECLARATION(dst),
+ const int DEPTH,
+ const int INPUT1_WIDTH,
+ const int INPUT2_WIDTH,
+ const int INPUT3_WIDTH)
{
// Calculate input indices
const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
@@ -251,18 +259,17 @@ __kernel void concatenate_width_x4(
STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif /* defined(INPUT2_WIDTH) && defined(INPUT3_WIDTH) */
-#endif /* defined(INPUT1_WIDTH) */
-#endif /* defined(DEPTH) && defined(ELEMENT_SIZE) */
+#endif /* defined(CONCATENATE_WIDTH_X4) */
+#endif /* defined(ELEMENT_SIZE) */
-#if defined(WIDTH_OFFSET) && defined(DEPTH) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
+#if defined(WIDTH_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
+#if defined(CONCATENATE_WIDTH)
/** This kernel concatenates the input tensor into the output tensor along the first dimension
*
* @note The data type has to be passed at compile time using -DDATA_TYPE. i.e. -DDATA_TYPE=float
* @note Vector size has to be passed at compile time using -DVEC_SIZE. i.e. -DVEC_SIZE=16
* @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note The offset for the first spatial dimension has to be passed at compile time using -DWIDTH_OFFSET. i.e. -DWIDTH_OFFSET=128
- * @note Tensor depth should be given as a preprocessor argument using -DDEPTH=size. e.g. -DDEPTH=16
*
* @param[in] src_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/F32
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -284,11 +291,12 @@ __kernel void concatenate_width_x4(
* @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] DEPTH Tensor depth
*/
-
__kernel void concatenate_width(
TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst))
+ TENSOR4D_DECLARATION(dst),
+ const int DEPTH)
{
// Calculate input indices
const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
@@ -308,19 +316,18 @@ __kernel void concatenate_width(
STORE_VECTOR_SELECT(source_values, DATA_TYPE, dst_addr + WIDTH_OFFSET * sizeof(DATA_TYPE), VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
}
-
-#endif /* defined(WIDTH_OFFSET) && defined(DEPTH) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)*/
+#endif /* defined(CONCATENATE_WIDTH) */
+#endif /* defined(WIDTH_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)*/
#if defined(VEC_SIZE_LEFTOVER)
-
-#if defined(HEIGHT_OFFSET) && defined(DEPTH) && defined(VEC_SIZE)
+#if defined(CONCATENATE_HEIGHT)
+#if defined(HEIGHT_OFFSET) && defined(VEC_SIZE)
/** This kernel concatenates the input tensor into the output tensor along the second dimension
*
* @note The data type has to be passed at compile time using -DDATA_TYPE. i.e. -DDATA_TYPE=float
* @note Vector size has to be passed at compile time using -DVEC_SIZE. i.e. -DVEC_SIZE=16
* @note Vector sizes supported are 2,4,8 and 16.
* @note The offset for the second spatial dimension has to be passed at compile time using -DHEIGHT_OFFSET. i.e. -DHEIGHT_OFFSET=128
- * @note Tensor depth should be given as a preprocessor argument using -DDEPTH=size. e.g. -DDEPTH=16
* @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
*
* @param[in] src_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/F32
@@ -343,11 +350,12 @@ __kernel void concatenate_width(
* @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] DEPTH Tensor depth
*/
-
__kernel void concatenate_height(
TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst))
+ TENSOR4D_DECLARATION(dst),
+ const int DEPTH)
{
const int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0) * sizeof(DATA_TYPE);
@@ -365,9 +373,10 @@ __kernel void concatenate_height(
STORE_VECTOR_SELECT(source_values, DATA_TYPE, dst_addr + HEIGHT_OFFSET * dst_stride_y, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
}
+#endif /* defined(CONCATENATE_HEIGHT) */
+#endif /* defined(HEIGHT_OFFSET) */
-#endif /* defined(HEIGHT_OFFSET) && defined(DEPTH) */
-
+#if defined(CONCATENATE)
/** This kernel concatenates the input tensor into the output tensor along the third dimension
*
* @note The data type has to be passed at compile time using -DDATA_TYPE. i.e. -DDATA_TYPE=float
@@ -410,6 +419,7 @@ __kernel void concatenate(
STORE_VECTOR_SELECT(source_values, DATA_TYPE, dst_addr + offset, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
+#endif // defined(CONCATENATE)
#endif /* defined(VEC_SIZE_LEFTOVER) */
#endif /* defined(DATA_TYPE) */
#endif /* defined(VEC_SIZE) */
diff --git a/src/core/CL/cl_kernels/convert_fc_weights.cl b/src/core/CL/cl_kernels/common/convert_fc_weights.cl
index a451c0213b..01ef04a7d6 100644
--- a/src/core/CL/cl_kernels/convert_fc_weights.cl
+++ b/src/core/CL/cl_kernels/common/convert_fc_weights.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/convolution_layer.cl b/src/core/CL/cl_kernels/common/convolution_layer.cl
index cfd1f12328..be76929ac8 100644
--- a/src/core/CL/cl_kernels/convolution_layer.cl
+++ b/src/core/CL/cl_kernels/common/convolution_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/copy_tensor.cl b/src/core/CL/cl_kernels/common/copy_tensor.cl
index 9c90969827..753b98d1b0 100644
--- a/src/core/CL/cl_kernels/copy_tensor.cl
+++ b/src/core/CL/cl_kernels/common/copy_tensor.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/crop_tensor.cl b/src/core/CL/cl_kernels/common/crop_tensor.cl
index 62ae36ac5c..d9090dc838 100644
--- a/src/core/CL/cl_kernels/crop_tensor.cl
+++ b/src/core/CL/cl_kernels/common/crop_tensor.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#if defined(DATA_TYPE) // Compile time constants
-/** Performs a copy of input tensor to the output tensor.
+/** Performs a tensor cropping.
*
* @param[in] in_ptr Pointer to the source tensor. Supported data types: All
* @param[in] in_stride_x Stride of the source tensor in X dimension (in bytes)
diff --git a/src/core/CL/cl_kernels/deconvolution_layer.cl b/src/core/CL/cl_kernels/common/deconvolution_layer.cl
index b1d5e61476..4ac5e3f0e9 100644
--- a/src/core/CL/cl_kernels/deconvolution_layer.cl
+++ b/src/core/CL/cl_kernels/common/deconvolution_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/common/dequantization_layer.cl b/src/core/CL/cl_kernels/common/dequantization_layer.cl
new file mode 100644
index 0000000000..7fa62577ce
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/dequantization_layer.cl
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST) && defined(SCALE) && defined(OFFSET)
+
+/** This performs the dequantization of 8-bit unsigned integers to floating point.
+ *
+ * @note Source datatype should be given as a preprocessor argument using -DDATA_TYPE_SRC=type. e.g. -DDATA_TYPE_SRC=char
+ * @note Destination datatype should be given as a preprocessor argument using -DDATA_TYPE_DST=type. e.g. -DDATA_TYPE_DST=float
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Quantization scale of input tensor is passed in with -DSCALE=scale.
+ * @note Quantization offset of input tensor is passed in with -DOFFSET=offset.
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: F16/F32
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void dequantization_layer(
+ TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
+{
+ // Get pixels pointer
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+#if defined(LAST_ACCESSED_X)
+ // Check if access on width gets out of bounds
+ // If it does shift access vector to access elements within bounds
+ const int xi = (int)(get_global_id(0) * VEC_SIZE);
+ input.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * input_stride_x;
+ output.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * output_stride_x;
+
+ // Load data
+ VEC_DATA_TYPE(int, VEC_SIZE)
+ val = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_SRC *)input.ptr), VEC_DATA_TYPE(int, VEC_SIZE));
+
+ // Create scale and offset vectors
+ const VEC_DATA_TYPE(float, VEC_SIZE)
+ vscale = SCALE;
+
+ const VEC_DATA_TYPE(int, VEC_SIZE)
+ voffset = OFFSET;
+
+ // Dequantize
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res = vscale * CONVERT((val - voffset), VEC_DATA_TYPE(float, VEC_SIZE));
+
+ // Store result
+ VSTORE(VEC_SIZE)
+ (CONVERT(res, VEC_DATA_TYPE(DATA_TYPE_DST, VEC_SIZE)), 0, (__global DATA_TYPE_DST *)output.ptr);
+#else // !defined(LAST_ACCESSED_X)
+ *((__global DATA_TYPE_DST *)(output.ptr)) = (DATA_TYPE_DST)((float)((int)(*((__global DATA_TYPE_SRC *)(input.ptr))) - (int)(OFFSET)) * (float)(SCALE));
+#endif // defined(LAST_ACCESSED_X)
+}
+#endif // defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST) && defined(SCALE) && defined(OFFSET) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/elementwise_operation.cl b/src/core/CL/cl_kernels/common/elementwise_operation.cl
index 11e0612205..91e51d9d1a 100644
--- a/src/core/CL/cl_kernels/elementwise_operation.cl
+++ b/src/core/CL/cl_kernels/common/elementwise_operation.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,7 +23,7 @@
*/
#include "helpers.h"
-#if defined(OP) && defined(VEC_SIZE_IN1) && defined(VEC_SIZE_IN2) && defined(VEC_SIZE_OUT) && defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_OUT)
+#if defined(OP) && defined(VEC_SIZE_IN1) && defined(VEC_SIZE_IN2) && defined(VEC_SIZE_OUT) && defined(DATA_TYPE)
/** List of all the operations supported by this kernel.
* @note ADD and SUB operations, when executed on integers, support saturation */
@@ -38,12 +38,18 @@
#define MAX(x, y) max(x, y)
#define MIN(x, y) min(x, y)
#define SQUARED_DIFF(x, y) (x - y) * (x - y)
-#define DIV(x, y) (x / y)
#define POWER(x, y) pow(x, y)
-#define PRELU(x, y) (select(y * x, x, CONVERT((x > (DATA_TYPE_OUT)0), SELECT_VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT))))
-#define AND(x, y) (CONVERT((x && y), VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT)) & ((VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT))1))
-#define OR(x, y) (CONVERT((x || y), VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT)) & ((VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT))1))
+#if VEC_SIZE_OUT == 1
+#define PRELU(x, y) (x > 0 ? x : x * y)
+#else // VEC_SIZE_OUT == 1
+#define PRELU(x, y) (select(y * x, x, CONVERT((x > (DATA_TYPE)0), SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT))))
+#endif // VEC_SIZE_OUT == 1
+
+#define DIV(x, y) (x / y)
+
+#define AND(x, y) (CONVERT((x && y), VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT)) & ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT))1))
+#define OR(x, y) (CONVERT((x || y), VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT)) & ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT))1))
#define OP_FUN_NAME_STR(op) elementwise_operation_##op
#define OP_FUN_NAME(op) OP_FUN_NAME_STR(op)
@@ -56,8 +62,7 @@
*
* @note Vector sizes of inputs and output have to be passed at compile time using -DVEC_SIZE_IN1, -DVEC_SIZE_IN2, -DVEC_SIZE_OUT.
* @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_OUT=3. It is defined as the remainder between the input's first dimension and VEC_SIZE_OUT
- * @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN1, -DDATA_TYPE_IN2 and -DDATA_TYPE_OUT:
- * e.g. -DDATA_TYPE_IN1=uchar -DDATA_TYPE_IN2=uchar -DDATA_TYPE_OUT=short
+ * @note The input and output data_types need to be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=uchar
* @note To perform saturating operation -DSATURATE has to be passed to the compiler otherwise wrapping policy will be used.
* @note The element-wise operation to be executed has to be passed at compile time using -DOP (e.g., -DOP=ADD)
*
@@ -88,8 +93,12 @@
*/
__kernel void OP_FUN_NAME(OP)(
TENSOR3D_DECLARATION(in1),
- TENSOR3D_DECLARATION(in2),
- TENSOR3D_DECLARATION(out))
+ TENSOR3D_DECLARATION(in2)
+#if !defined(IN_PLACE)
+ ,
+ TENSOR3D_DECLARATION(out)
+#endif // !defined(IN_PLACE)
+)
{
#if VEC_SIZE_IN1 == 1
uint in1_x_offs = 0;
@@ -101,26 +110,37 @@ __kernel void OP_FUN_NAME(OP)(
#else // VEC_SIZE_IN2 == 1
uint in2_x_offs = max((int)(get_global_id(0) * VEC_SIZE_IN2 - (VEC_SIZE_IN2 - VEC_SIZE_LEFTOVER) % VEC_SIZE_IN2), 0);
#endif // VEC_SIZE_IN2 == 1
+#if !defined(IN_PLACE)
uint out_x_offs = max((int)(get_global_id(0) * VEC_SIZE_OUT - (VEC_SIZE_OUT - VEC_SIZE_LEFTOVER) % VEC_SIZE_OUT), 0);
+#endif // !defined(IN_PLACE)
// Get pixels pointer
- __global uchar *in1_addr = in1_ptr + in1_offset_first_element_in_bytes + in1_x_offs * sizeof(DATA_TYPE_IN1) + get_global_id(1) * in1_step_y + get_global_id(2) * in1_step_z;
- __global uchar *in2_addr = in2_ptr + in2_offset_first_element_in_bytes + in2_x_offs * sizeof(DATA_TYPE_IN2) + get_global_id(1) * in2_step_y + get_global_id(2) * in2_step_z;
- __global uchar *out_addr = out_ptr + out_offset_first_element_in_bytes + out_x_offs * sizeof(DATA_TYPE_OUT) + get_global_id(1) * out_step_y + get_global_id(2) * out_step_z;
+ __global uchar *in1_addr = in1_ptr + in1_offset_first_element_in_bytes + in1_x_offs * sizeof(DATA_TYPE) + get_global_id(1) * in1_step_y + get_global_id(2) * in1_step_z;
+ __global uchar *in2_addr = in2_ptr + in2_offset_first_element_in_bytes + in2_x_offs * sizeof(DATA_TYPE) + get_global_id(1) * in2_step_y + get_global_id(2) * in2_step_z;
+ __global uchar *
+#if !defined(IN_PLACE)
+ out_addr = out_ptr + out_offset_first_element_in_bytes + out_x_offs * sizeof(DATA_TYPE) + get_global_id(1) * out_step_y + get_global_id(2) * out_step_z;
+#else // !defined(IN_PLACE)
+#if defined(SRC1_IN_PLACE)
+ out_addr = in1_addr;
+#else //defined(SRC1_IN_PLACE)
+ out_addr = in2_addr;
+#endif //defined(SRC1_IN_PLACE)
+#endif // !defined(IN_PLACE)
// Load values
- VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT)
- in_a = CONVERT((VEC_DATA_TYPE(DATA_TYPE_IN1, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN1)(0, (__global DATA_TYPE_IN1 *)in1_addr)), VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT));
- VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT)
- in_b = CONVERT((VEC_DATA_TYPE(DATA_TYPE_IN2, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN2)(0, (__global DATA_TYPE_IN2 *)in2_addr)), VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT));
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT)
+ in_a = CONVERT((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN1)(0, (__global DATA_TYPE *)in1_addr)), VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT));
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT)
+ in_b = CONVERT((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN2)(0, (__global DATA_TYPE *)in2_addr)), VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT));
// Calculate and store result
- VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT)
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT)
res0 = OP(in_a, in_b);
#if defined(ACTIVATION_TYPE)
- res0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE_OUT, VEC_SIZE_OUT, res0, A_VAL, B_VAL);
+ res0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE_OUT, res0, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
- STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE_OUT, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
+ STORE_VECTOR_SELECT(res, DATA_TYPE, out_addr, VEC_SIZE_OUT, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif /* defined(OP) && defined(VEC_SIZE_IN1) && defined(VEC_SIZE_IN2) && defined(VEC_SIZE_OUT) && defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_OUT) */
+#endif /* defined(OP) && defined(VEC_SIZE_IN1) && defined(VEC_SIZE_IN2) && defined(VEC_SIZE_OUT) && defined(DATA_TYPE) */
diff --git a/src/core/CL/cl_kernels/elementwise_operation_quantized.cl b/src/core/CL/cl_kernels/common/elementwise_operation_quantized.cl
index a08c3b2d47..a11be80875 100644
--- a/src/core/CL/cl_kernels/elementwise_operation_quantized.cl
+++ b/src/core/CL/cl_kernels/common/elementwise_operation_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,7 +28,7 @@
#define MAX(x, y) max((x), (y))
#define MIN(x, y) min((x), (y))
#define SQUARED_DIFF(x, y) (x - y) * (x - y)
-#define PRELU(x, y) (select(y * x, x, CONVERT((x > (DATA_TYPE_OUT)0), SELECT_VEC_DATA_TYPE(float, VEC_SIZE_OUT))))
+#define PRELU(x, y) (select(y * x, x, CONVERT((x > (DATA_TYPE)0), SELECT_VEC_DATA_TYPE(float, VEC_SIZE_OUT))))
#define DIV(x, y) (x / y)
#define CONVERT_RTE(x, type) (convert_##type##_rte((x)))
@@ -37,11 +37,11 @@
#define OP_FUN_NAME_STR(op) elementwise_operation_##op##_quantized
#define OP_FUN_NAME(op) OP_FUN_NAME_STR(op)
-#if defined(OP) && defined(VEC_SIZE_IN1) && defined(VEC_SIZE_IN2) && defined(VEC_SIZE_OUT) && defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE_OUT)
+#if defined(OP) && defined(VEC_SIZE_IN1) && defined(VEC_SIZE_IN2) && defined(VEC_SIZE_OUT) && defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE)
#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE_OUT)
#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE_OUT)
-#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT)
+#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT)
/** This function executes an element-wise operation among two tensors.
*
@@ -57,7 +57,7 @@
* @note To perform saturating operation -DSATURATE has to be passed to the compiler otherwise wrapping policy will be used.
* @note The element-wise operation to be executed has to be passed at compile time using -DOP (e.g., -DOP=ADD)
* @note For QSYMM16 operations OFFSET_IN1, OFFSET_IN2 and OFFSET_OUT must be set to zero
- * @note The data type must be passed at compile time using -DDATA_TYPE_OUT, i.e. -DDATA_TYPE_OUT=uchar
+ * @note The data type must be passed at compile time using -DDATA_TYPE, i.e. -DDATA_TYPE=uchar
*
* @param[in] in1_ptr Pointer to the source tensor. Supported data types: QASYMM8/QSYMM16
* @param[in] in1_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -86,8 +86,12 @@
*/
__kernel void OP_FUN_NAME(OP)(
TENSOR3D_DECLARATION(in1),
- TENSOR3D_DECLARATION(in2),
- TENSOR3D_DECLARATION(out))
+ TENSOR3D_DECLARATION(in2)
+#if !defined(IN_PLACE)
+ ,
+ TENSOR3D_DECLARATION(out)
+#endif // !defined(IN_PLACE)
+)
{
#if VEC_SIZE_IN1 == 1
uint in1_x_offs = 0;
@@ -99,15 +103,26 @@ __kernel void OP_FUN_NAME(OP)(
#else // VEC_SIZE_IN2 == 1
uint in2_x_offs = max((int)(get_global_id(0) * VEC_SIZE_IN2 - (VEC_SIZE_IN2 - VEC_SIZE_LEFTOVER) % VEC_SIZE_IN2), 0);
#endif // VEC_SIZE_IN2 == 1
+#if !defined(IN_PLACE)
uint out_x_offs = max((int)(get_global_id(0) * VEC_SIZE_OUT - (VEC_SIZE_OUT - VEC_SIZE_LEFTOVER) % VEC_SIZE_OUT), 0);
+#endif // !defined(IN_PLACE)
// Get pixels pointer
- __global uchar *in1_addr = in1_ptr + in1_offset_first_element_in_bytes + in1_x_offs * sizeof(DATA_TYPE_OUT) + get_global_id(1) * in1_step_y + get_global_id(2) * in1_step_z;
- __global uchar *in2_addr = in2_ptr + in2_offset_first_element_in_bytes + in2_x_offs * sizeof(DATA_TYPE_OUT) + get_global_id(1) * in2_step_y + get_global_id(2) * in2_step_z;
- __global uchar *out_addr = out_ptr + out_offset_first_element_in_bytes + out_x_offs * sizeof(DATA_TYPE_OUT) + get_global_id(1) * out_step_y + get_global_id(2) * out_step_z;
+ __global uchar *in1_addr = in1_ptr + in1_offset_first_element_in_bytes + in1_x_offs * sizeof(DATA_TYPE) + get_global_id(1) * in1_step_y + get_global_id(2) * in1_step_z;
+ __global uchar *in2_addr = in2_ptr + in2_offset_first_element_in_bytes + in2_x_offs * sizeof(DATA_TYPE) + get_global_id(1) * in2_step_y + get_global_id(2) * in2_step_z;
+ __global uchar *
+#if !defined(IN_PLACE)
+ out_addr = out_ptr + out_offset_first_element_in_bytes + out_x_offs * sizeof(DATA_TYPE) + get_global_id(1) * out_step_y + get_global_id(2) * out_step_z;
+#else // !defined(IN_PLACE)
+#if defined(SRC1_IN_PLACE)
+ out_addr = in1_addr;
+#else //defined(SRC1_IN_PLACE)
+ out_addr = in2_addr;
+#endif //defined(SRC1_IN_PLACE)
+#endif // !defined(IN_PLACE)
- VEC_INT in_a = CONVERT((VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN1)(0, (__global DATA_TYPE_OUT *)in1_addr)), VEC_INT);
- VEC_INT in_b = CONVERT((VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN2)(0, (__global DATA_TYPE_OUT *)in2_addr)), VEC_INT);
+ VEC_INT in_a = CONVERT((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN1)(0, (__global DATA_TYPE *)in1_addr)), VEC_INT);
+ VEC_INT in_b = CONVERT((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN2)(0, (__global DATA_TYPE *)in2_addr)), VEC_INT);
in_a = SUB(in_a, (VEC_INT)((int)OFFSET_IN1));
in_b = SUB(in_b, (VEC_INT)((int)OFFSET_IN2));
@@ -118,6 +133,6 @@ __kernel void OP_FUN_NAME(OP)(
const VEC_TYPE res0 = CONVERT_SAT(CONVERT_DOWN(qresf32, VEC_INT), VEC_TYPE);
// Store result
- STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE_OUT, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
+ STORE_VECTOR_SELECT(res, DATA_TYPE, out_addr, VEC_SIZE_OUT, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif /* defined(OP) && defined(VEC_SIZE_IN1) && defined(VEC_SIZE_IN2) && defined(VEC_SIZE_OUT) && defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE_OUT) */
+#endif /* defined(OP) && defined(VEC_SIZE_IN1) && defined(VEC_SIZE_IN2) && defined(VEC_SIZE_OUT) && defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE) */
diff --git a/src/core/CL/cl_kernels/elementwise_unary.cl b/src/core/CL/cl_kernels/common/elementwise_unary.cl
index d2d9d97d33..81835108a3 100644
--- a/src/core/CL/cl_kernels/elementwise_unary.cl
+++ b/src/core/CL/cl_kernels/common/elementwise_unary.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,7 +22,6 @@
* SOFTWARE.
*/
#include "helpers.h"
-#include "warp_helpers.h"
#if defined(DATA_TYPE) && defined(OPERATION)
@@ -38,13 +37,13 @@
#define fabs_op(input) fabs(input)
// Calculate natural_log
#define natural_log_op(input) log(input)
-// Calculate round (Cannot use round function as it rounds halfway cases away from zero).
+// Calculate round using round to nearest even rounding mode
+#define round_op(input) rint(input)
+
#if defined(VEC_SIZE)
#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-#define round_op(input) CONVERT(CONVERT_SAT_ROUND(input, VEC_DATA_TYPE(int, VEC_SIZE), rte), VEC_TYPE)
#define logical_not_op(input) CONVERT(CONVERT(!input, VEC_TYPE) & ((VEC_TYPE)0x1), VEC_TYPE)
#else // defined(VEC_SIZE)
-#define round_op(input) CONVERT(CONVERT_SAT_ROUND(input, int, rte), DATA_TYPE)
#define logical_not_op(input) ((!input) & 0x1)
#endif // defined(VEC_SIZE)
diff --git a/src/core/CL/cl_kernels/common/elementwise_unary_quantized.cl b/src/core/CL/cl_kernels/common/elementwise_unary_quantized.cl
new file mode 100644
index 0000000000..2e4cdc53fe
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/elementwise_unary_quantized.cl
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(OPERATION)
+// Calculate reverse square root
+#define rsqrt_op(input) rsqrt(input)
+#if defined(VEC_SIZE)
+#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE)
+#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
+#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+#endif // defined(VEC_SIZE)
+
+/** Applies element wise unary operator in a tensor.
+ *
+ * @param[in] in_ptr Pointer to the source image. Supported data types: QASYMM8/QASYMM8_SIGNED.
+ * @param[in] in_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] in_step_x in_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] in_step_y in_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] in_step_z in_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] in_offset_first_element_in_bytes Offset of the first element in the source image
+ * @param[out] out_ptr Pointer to the destination image. Supported data types: QASYMM8/QASYMM8_SIGNED.
+ * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
+ * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] out_step_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] out_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] out_offset_first_element_in_bytes Offset of the first element in the destination image
+ */
+__kernel void elementwise_unary_quantized(
+ TENSOR3D_DECLARATION(in),
+ TENSOR3D_DECLARATION(out))
+{
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(in);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+
+ // Check if access on width gets out of bounds
+ // If it does shift access vector to access elements within bounds
+ const int xi = (int)(get_global_id(0) * VEC_SIZE);
+ in.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * in_stride_x;
+ out.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * out_stride_x;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ data_f32 = CONVERT(data, VEC_FLOAT);
+ data_f32 = (data_f32 - (float)OFFSET_IN) * (float)SCALE_IN;
+ VEC_INT qres_int = CONVERT_SAT((OPERATION(data_f32) / ((VEC_FLOAT)(float)SCALE_OUT)), VEC_INT) + ((VEC_INT)((int)OFFSET_OUT));
+ const VEC_TYPE qres = CONVERT_SAT(qres_int, VEC_TYPE);
+ VSTORE(VEC_SIZE)
+ (qres, 0, (__global DATA_TYPE *)out.ptr);
+}
+#endif // defined(DATA_TYPE) && defined(OPERATION)
diff --git a/src/core/CL/cl_kernels/fft.cl b/src/core/CL/cl_kernels/common/fft.cl
index 51763a620a..3f26d0f1a6 100644
--- a/src/core/CL/cl_kernels/fft.cl
+++ b/src/core/CL/cl_kernels/common/fft.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/fft_digit_reverse.cl b/src/core/CL/cl_kernels/common/fft_digit_reverse.cl
index de566212c6..5f64d95bf9 100644
--- a/src/core/CL/cl_kernels/fft_digit_reverse.cl
+++ b/src/core/CL/cl_kernels/common/fft_digit_reverse.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/fft_scale.cl b/src/core/CL/cl_kernels/common/fft_scale.cl
index 57e25ef504..c799dd3b9e 100644
--- a/src/core/CL/cl_kernels/fft_scale.cl
+++ b/src/core/CL/cl_kernels/common/fft_scale.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/fill_border.cl b/src/core/CL/cl_kernels/common/fill_border.cl
index 5775d899e8..a43343c9f4 100644
--- a/src/core/CL/cl_kernels/fill_border.cl
+++ b/src/core/CL/cl_kernels/common/fill_border.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/floor.cl b/src/core/CL/cl_kernels/common/floor.cl
index f6dd4edd2e..f6dd4edd2e 100644
--- a/src/core/CL/cl_kernels/floor.cl
+++ b/src/core/CL/cl_kernels/common/floor.cl
diff --git a/src/core/CL/cl_kernels/gather.cl b/src/core/CL/cl_kernels/common/gather.cl
index 41f439cb47..e16f4bf315 100644
--- a/src/core/CL/cl_kernels/gather.cl
+++ b/src/core/CL/cl_kernels/common/gather.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,7 +29,6 @@
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Axis should be given as a preprocessor argument using -DAXIS=axis. e.g. -DAXIS=1
* @attention Output tensor depth should be given as a preprocessor argument using -DOUTPUT_DIM_Z=size. e.g. -DOUTPUT_DIM_Z=16
- * @attention Input tensor depth should be given as a preprocessor argument using -DINPUT_DIM_Z=size. e.g. -DINPUT_DIM_Z=16
*
*
* @param[in] input_ptr Pointer to the source tensor. Supported data types: All
@@ -59,33 +58,73 @@
*/
__kernel void gather(
TENSOR4D_DECLARATION(input),
- VECTOR_DECLARATION(indices),
+ TENSOR4D_DECLARATION(indices),
TENSOR4D_DECLARATION(output))
{
const int px = get_global_id(0);
const int py = get_global_id(1);
const int pz = get_global_id(2) % OUTPUT_DIM_Z;
- const int pw = get_global_id(2) / OUTPUT_DIM_Z;
+ const int pw = (get_global_id(2) / OUTPUT_DIM_Z );
- const Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, INPUT_DIM_Z);
- const Vector indices = CONVERT_TO_VECTOR_STRUCT_NO_STEP(indices);
+ const Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
+ const Tensor4D indices = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(indices);
Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, OUTPUT_DIM_Z);
#if AXIS == 0
- const uint index = *(__global const uint *)vector_offset(&indices, px);
- __global const uchar *input_addr = tensor4D_offset(&input, index, py, pz, pw);
+#if INDICES_DIMS == 1
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, px, 0, 0, 0);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, safe_index, py, pz, pw);
+#elif INDICES_DIMS == 2
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, px, py, 0, 0);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, safe_index, pz, pw, 0);
+#elif INDICES_DIMS == 3
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, px, py, pz, 0);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, safe_index, pw, 0, 0);
+#elif INDICES_DIMS == 4
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, px, py, pz, pw);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, safe_index, 0, 0, 0);
+#endif //INDICES_DIMS
+
#elif AXIS == 1
- const uint index = *(__global const uint *)vector_offset(&indices, py);
- __global const uchar *input_addr = tensor4D_offset(&input, px, index, pz, pw);
+#if INDICES_DIMS == 1
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, py, 0, 0, 0);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, px, safe_index, pz, pw);
+#elif INDICES_DIMS == 2
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, py, pz, 0, 0);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, px, safe_index, pw, 0);
+#elif INDICES_DIMS == 3
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, py, pz, pw, 0);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, px, safe_index, 0, 0);
+#endif //INDICES_DIMS
+
#elif AXIS == 2
- const uint index = *(__global const uint *)vector_offset(&indices, pz);
- __global const uchar *input_addr = tensor4D_offset(&input, px, py, index, pw);
+#if INDICES_DIMS == 1
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, pz, 0, 0, 0);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, px, py, safe_index, pw);
+#elif INDICES_DIMS == 2
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, pz, pw, 0, 0);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, px, py, safe_index, 0);
+#endif //INDICES_DIMS
+
#elif AXIS == 3
- const uint index = *(__global const uint *)vector_offset(&indices, pw);
- __global const uchar *input_addr = tensor4D_offset(&input, px, py, pz, index);
+#if INDICES_DIMS == 1
+ const uint index = *(__global const uint *)tensor4D_offset(&indices, pw, 0, 0, 0);
+ const uint safe_index = select((uint)0, index, index < INDEX_LIMIT);
+ __global const uchar *input_addr = tensor4D_offset(&input, px, py, pz, safe_index);
+#endif //INDICES_DIMS
+
#endif //AXIS
- *(__global DATA_TYPE *)output.ptr = *((__global const DATA_TYPE *)input_addr);
+ *(__global DATA_TYPE *)output.ptr = select((DATA_TYPE)0, *((__global const DATA_TYPE *)input_addr), (DATA_TYPE)(index < INDEX_LIMIT));
}
-#endif //defined(DATA_TYPE) && defined(AXIS) \ No newline at end of file
+#endif //defined(DATA_TYPE) && defined(AXIS)
diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/common/gemm.cl
index 6883aafee5..0c30c0e626 100644
--- a/src/core/CL/cl_kernels/gemm.cl
+++ b/src/core/CL/cl_kernels/common/gemm.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,856 +24,7 @@
#include "gemm_helpers.h"
#include "repeat.h"
-#if defined(M0) && defined(K0) && defined(V0) && defined(DATA_TYPE) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(PARTIAL_LOAD_M0) && defined(PARTIAL_LOAD_K0)
-#define INC2 (VEC_DATA_TYPE(uint, 2))(0, 1)
-#define INC3 (VEC_DATA_TYPE(uint, 3))(0, 1, 2)
-#define INC4 (VEC_DATA_TYPE(uint, 4))(0, 1, 2, 3)
-#define INC8 (VEC_DATA_TYPE(uint, 8))(0, 1, 2, 3, 4, 5, 6, 7)
-#define INC16 (VEC_DATA_TYPE(uint, 16))(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
-#define CONCAT_INC(K0) INC##K0
-#define INC(K0) CONCAT_INC(K0)
-
-#if(SRC_WIDTH % K0)
-#define BOUNDARY_CONDITION_X(x, a) \
- ({ \
- a = select(0, a, CONVERT(((x * (VEC_DATA_TYPE(uint, K0))K0 + INC(K0)) < (VEC_DATA_TYPE(uint, K0))SRC_WIDTH), VEC_DATA_TYPE(DATA_TYPE, K0))); \
- })
-#else // (SRC_WIDTH % K0)
-#define BOUNDARY_CONDITION_X(x, a) \
- ({})
-#endif // (SRC_WIDTH % K0)
-
-#define LOAD_TENSOR_BOUNDARY_AWARE_M0XK0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
- ({ \
- if(y * M0 + M0 >= SRC_HEIGHT && PARTIAL_LOAD_M0 != 0) \
- { \
- if(x * K0 + K0 >= SRC_WIDTH && (PARTIAL_LOAD_K0 != 0)) \
- { \
- LOAD_TENSOR_M0XN0(PARTIAL_LOAD_M0, PARTIAL_LOAD_K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
- } \
- else \
- { \
- LOAD_TENSOR_M0XN0(PARTIAL_LOAD_M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
- } \
- } \
- else \
- { \
- if(x * K0 + K0 >= SRC_WIDTH && (PARTIAL_LOAD_K0 != 0)) \
- { \
- LOAD_TENSOR_M0XN0(M0, PARTIAL_LOAD_K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
- } \
- else \
- { \
- LOAD_TENSOR_M0XN0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
- } \
- } \
- })
-
-/** This OpenCL kernel reshapes the lhs input matrix. The kernel splits the input matrix in blocks of size M0xK0 and stores each one (not transposed) in
- * the output matrix unrolling the values.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
- * @note The width of the input tensor must be passed at compile time using -DSRC_WIDTH (e.g. -DSRC_WIDTH=16)
- * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
- * @note The block's dimensions (M0 and K0) must be passed at compile time using -DM0 and -DK0 (e.g. -DM0=2, -DK0=2).
- * @note The number of M0xK0 vertical blocks to store on the same output row must be passed at compile time using -DV0 (e.g. -DV0=2)
- * @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_LOAD_M0 (e.g. -DPARTIAL_LOAD_M0=1)
- * @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_LOAD_K0 (e.g. -DPARTIAL_LOAD_K0=1)
- * @note Only the following values for M0, K0 and V0 are supported:
- * M0: 2,3,4,5,6,7,8
- * K0: 2,3,4,8,16
- * V0: greater than 0
- * @note In case the input has to be reinterpreted as a 3D tensor (e.g. input of convolution layer 1x1), the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# HEIGHT_GEMM3D: The height of the input in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the input in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- * @note If the M0xK0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
- *
- * @param[in] src_ptr Pointer to the source LHS tensor. Supported data types: All
- * @param[in] src_stride_x Stride of the source LHS tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source LHS tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source LHS tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source LHS tensor
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
- */
-__kernel void gemm_reshape_lhs_matrix_nt(TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst)
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
- )
-{
- // Block size
-#define BLOCK_SIZE ((M0) * (K0))
-
- // Output offset X
-#if defined(INTERLEAVE)
-#define OUTPUT_OFFSET_X (K0)
-#else // defined(INTERLEAVE)
-#define OUTPUT_OFFSET_X (BLOCK_SIZE)
-#endif // defined(INTERLEAVE)
-
- // Output step X
-#if defined(INTERLEAVE)
-#define OUTPUT_STEP_X (K0) * (V0)
-#else // Do not interleave
-#define OUTPUT_STEP_X (K0)
-#endif // defined(INTERLEAVE)
-
- // Compute source and destination addresses
- uint x = get_global_id(0);
- uint y = get_global_id(1);
- uint z = get_global_id(2);
-
- // ------------------ Compute input/output addresses ---------------------------
-
- // Compute the input address
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)K0 * sizeof(DATA_TYPE) + y * (uint)M0 * src_stride_y;
-
- // Compute the output address
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)BLOCK_SIZE * (uint)V0 * sizeof(DATA_TYPE)) + ((y / (uint)V0) * (uint)dst_stride_y) + ((y % V0) *
- (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE));
-
- // Create variables: uint zin0=0, zin1=0, zin2=0...zin(M0-1)=0;
- REPEAT_VAR_INIT_TO_CONST(M0, uint, zin, 0);
-
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply src_stride_z by DEPTH_GEMM3D
-
- input_ptr += z * (uint)src_stride_z * DEPTH_GEMM3D;
-
- // The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
- CALCULATE_Z_OFFSET(M0, uint, zin, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, cross_plane_pad, src_stride_y);
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- input_ptr += z * (uint)src_stride_z;
-
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Add offset for batched GEMM
- output_ptr += z * (uint)dst_stride_z;
-
- // ---------------------------Load input values --------------------------------
- // Load values from the LHS matrix
- REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, K0), a, 0);
-
- LOAD_TENSOR_BOUNDARY_AWARE_M0XK0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin);
-
- // ---------------------------Store output values ------------------------------
- REPEAT_VAR_INIT_TO_CONST(16, uint, zout, 0);
- STORE_BLOCK(M0, K0, DATA_TYPE, a, output_ptr, OUTPUT_STEP_X * sizeof(DATA_TYPE), zout);
-
-#undef BLOCK_SIZE
-#undef OUTPUT_OFFSET_X
-#undef OUTPUT_STEP_X
-}
-
-#if M0 == 2
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, M0) \
- res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i); \
- VSTORE(M0) \
- (res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- })
-#elif M0 == 3 // M0 == 3
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, M0) \
- res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i, a2.s##i); \
- VSTORE(M0) \
- (res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- })
-#elif M0 == 4 // M0 == 4
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, M0) \
- res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
- VSTORE(M0) \
- (res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- })
-#elif M0 == 5 // M0 == 5
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- res0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
- DATA_TYPE res1 = a4.s##i; \
- VSTORE(4) \
- (res0, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- *((__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE)) + 4) = res1; \
- })
-#elif M0 == 6 // M0 == 6
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- res0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
- VEC_DATA_TYPE(DATA_TYPE, 2) \
- res1 = (VEC_DATA_TYPE(DATA_TYPE, 2))(a4.s##i, a5.s##i); \
- VSTORE(4) \
- (res0, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- VSTORE(2) \
- (res1, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE)) + 4); \
- })
-#elif M0 == 7 // M0 == 7
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- res0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
- VEC_DATA_TYPE(DATA_TYPE, 3) \
- res1 = (VEC_DATA_TYPE(DATA_TYPE, 3))(a4.s##i, a5.s##i, a6.s##i); \
- VSTORE(4) \
- (res0, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- VSTORE(3) \
- (res1, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE)) + 4); \
- })
-#elif M0 == 8 // M0 == 8
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, M0) \
- res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i, a2.s##i, a3.s##i, a4.s##i, a5.s##i, a6.s##i, a7.s##i); \
- VSTORE(M0) \
- (res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- })
-#else // M0 not supported
-#error "M0 value not supported"
-#endif // N0 conditions
-
-/** This OpenCL kernel reshapes the lhs input matrix. The kernel splits the input matrix in blocks of size M0xK0 and stores each one (transposed) in
- * the output matrix unrolling the values.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
- * @note The width of the input tensor must be passed at compile time using -DSRC_WIDTH (e.g. -DSRC_WIDTH=16)
- * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
- * @note The block's dimensions (M0 and K0) must be passed at compile time using -DM0 and -DK0 (e.g. -DM0=2, -DK0=2).
- * @note The number of M0xK0 vertical blocks to store on the same output row must be passed at compile time using -DV0 (e.g. -DV0=2)
- * @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_LOAD_M0 (e.g. -DPARTIAL_LOAD_M0=1)
- * @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_LOAD_K0 (e.g. -DPARTIAL_LOAD_K0=1)
- * @note Only the following values for M0, K0 and V0 are supported:
- * M0: 2,3,4,5,6,7,8
- * K0: 2,3,4,8,16
- * V0: greater than 0
- * @note In case the input has to be reinterpreted as a 3D tensor (e.g. input of convolution layer 1x1), the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# HEIGHT_GEMM3D: The height of the input in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the input in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- * @note If the M0xK0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
- *
- * @param[in] src_ptr Pointer to the source LHS tensor. Supported data types: All
- * @param[in] src_stride_x Stride of the source LHS tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source LHS tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source LHS tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source LHS tensor
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
- */
-__kernel void gemm_reshape_lhs_matrix_t(TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst)
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
- )
-{
- // Block size
-#define BLOCK_SIZE ((M0) * (K0))
-
- // Output offset X
-#if defined(INTERLEAVE)
-#define OUTPUT_OFFSET_X (M0)
-#else // defined(INTERLEAVE)
-#define OUTPUT_OFFSET_X (BLOCK_SIZE)
-#endif // defined(INTERLEAVE)
-
- // Output step X
-#if defined(INTERLEAVE)
-#define OUTPUT_STEP_X (M0) * (V0)
-#else // Do not interleave
-#define OUTPUT_STEP_X (M0)
-#endif // defined(INTERLEAVE)
-
- // Compute source and destination addresses
- uint x = get_global_id(0);
- uint y = get_global_id(1);
- uint z = get_global_id(2);
-
- // ------------------ Compute input/output addresses ---------------------------
-
- // Compute the input address
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)K0 * sizeof(DATA_TYPE) + y * (uint)M0 * src_stride_y;
-
- // Compute the output address
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)BLOCK_SIZE * (uint)V0 * sizeof(DATA_TYPE)) + ((y / (uint)V0) * (uint)dst_stride_y) + ((y % V0) *
- (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE));
-
- // Create variables: uint zin0=0, zin1=0, zin2=0...zin(M0-1)=0;
- REPEAT_VAR_INIT_TO_CONST(M0, uint, zin, 0);
-
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply src_stride_z by DEPTH_GEMM3D
-
- input_ptr += z * (uint)src_stride_z * DEPTH_GEMM3D;
-
- // The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
- CALCULATE_Z_OFFSET(M0, uint, zin, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, cross_plane_pad, src_stride_y);
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- input_ptr += z * (uint)src_stride_z;
-
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Add offset for batched GEMM
- output_ptr += z * (uint)dst_stride_z;
-
- // ---------------------------Load input values --------------------------------
- REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, K0), a, 0);
-
- LOAD_TENSOR_BOUNDARY_AWARE_M0XK0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin);
-
- // ---------------------------Transpose and store block -----------------------
-
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 0);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 1);
-#if K0 > 2
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 2);
-#endif // K0 > 2
-#if K0 > 3
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 3);
-#endif // K0 > 3
-#if K0 > 4
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 4);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 5);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 6);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 7);
-#endif // K0 > 4
-#if K0 > 8
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 8);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 9);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, A);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, B);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, C);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, D);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, E);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, F);
-#endif // K0 > 8
-
-#undef BLOCK_SIZE
-#undef OUTPUT_OFFSET_X
-#undef OUTPUT_STEP_X
-}
-#endif // defined(M0) && defined(K0) && defined(V0) && defined(DATA_TYPE) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(PARTIAL_LOAD_M0) && defined(PARTIAL_LOAD_K0)
-
-#if defined(K0) && defined(N0) && defined(H0) && defined(DATA_TYPE) && defined(SRC_HEIGHT)
-/** This OpenCL kernel reshapes the rhs input matrix. The kernel splits the input matrix in blocks of size K0xN0 and stores each one (not transposed) in
- * the output matrix unrolling the values.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
- * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
- * @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (e.g. -DK0=2, -DN0=2).
- * @note The number of K0xN0 vertical blocks to store on the same output row must be passed at compile time using -DH0 (e.g. -DH0=2)
- * @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
- * @note Only the following values for K0, N0 and H0 are supported:
- * N0: 2,3,4,8,16
- * K0: 1,2,3,4,8,16
- * H0: greater than 0
- *
- * @param[in] src_ptr Pointer to the source RHS tensor. Supported data types: All
- * @param[in] src_stride_x Stride of the source RHS tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source RHS tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source RHS tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source RHS tensor
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- */
-__kernel void gemm_reshape_rhs_matrix_nt(TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- // Block size
-#define BLOCK_SIZE ((K0) * (N0))
-
- // Output offset X
-#if defined(INTERLEAVE)
-#define OUTPUT_OFFSET_X (N0)
-#else // defined(INTERLEAVE)
-#define OUTPUT_OFFSET_X (BLOCK_SIZE)
-#endif // defined(INTERLEAVE)
-
- // Output step X
-#if defined(INTERLEAVE)
-#define OUTPUT_STEP_X (N0) * (H0)
-#else // Do not interleave
-#define OUTPUT_STEP_X (N0)
-#endif // defined(INTERLEAVE)
-
- // Compute source and destination addresses
- uint x = get_global_id(0);
- uint y = get_global_id(1);
- uint z = get_global_id(2);
-
- // ------------------ Compute input/output addresses ---------------------------
-
- // Compute the input address
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)N0 * sizeof(DATA_TYPE) + y * (uint)K0 * src_stride_y + z * (uint)src_stride_z;
-
- // Compute the output address
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (y * (uint)BLOCK_SIZE * (uint)H0 * sizeof(DATA_TYPE)) + ((x % (uint)H0) * (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE)) + ((
- x / (uint)H0)
- * (uint)dst_stride_y)
- + z * (uint)dst_stride_z;
-
- // ---------------------------Load input values --------------------------------
-
- REPEAT_VAR_INIT_TO_CONST(K0, VEC_DATA_TYPE(DATA_TYPE, N0), a, 0); ////uint a0=0, a1=0, a2=0...a(M0-1)=0;
-
- // Load values from the RHS matrix
- a0 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y));
-#if K0 > 1
- if(y * (uint)K0 + 1 < SRC_HEIGHT)
- {
- a1 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y));
- }
-#endif // K0 > 1
-#if K0 > 2
- if(y * (uint)K0 + 2 < SRC_HEIGHT)
- {
- a2 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 2 * src_stride_y));
- }
-#endif // K0 > 2
-#if K0 > 3
- if(y * (uint)K0 + 3 < SRC_HEIGHT)
- {
- a3 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 3 * src_stride_y));
- }
-#endif // K0 > 3
-#if K0 > 4
- if(y * (uint)K0 + 4 < SRC_HEIGHT)
- {
- a4 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 4 * src_stride_y));
- }
- if(y * (uint)K0 + 5 < SRC_HEIGHT)
- {
- a5 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 5 * src_stride_y));
- }
- if(y * (uint)K0 + 6 < SRC_HEIGHT)
- {
- a6 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 6 * src_stride_y));
- }
- if(y * (uint)K0 + 7 < SRC_HEIGHT)
- {
- a7 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 7 * src_stride_y));
- }
-#endif // K0 > 4
-#if K0 > 8
- if(y * (uint)K0 + 8 < SRC_HEIGHT)
- {
- a8 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 8 * src_stride_y));
- }
- if(y * (uint)K0 + 9 < SRC_HEIGHT)
- {
- a9 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 9 * src_stride_y));
- }
- if(y * (uint)K0 + 10 < SRC_HEIGHT)
- {
- aA = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 10 * src_stride_y));
- }
- if(y * (uint)K0 + 11 < SRC_HEIGHT)
- {
- aB = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 11 * src_stride_y));
- }
- if(y * (uint)K0 + 12 < SRC_HEIGHT)
- {
- aC = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 12 * src_stride_y));
- }
- if(y * (uint)K0 + 13 < SRC_HEIGHT)
- {
- aD = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 13 * src_stride_y));
- }
- if(y * (uint)K0 + 14 < SRC_HEIGHT)
- {
- aE = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 14 * src_stride_y));
- }
- if(y * (uint)K0 + 15 < SRC_HEIGHT)
- {
- aF = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 15 * src_stride_y));
- }
-#endif // K0 > 8
-
- // ---------------------------Store output values ------------------------------
- REPEAT_VAR_INIT_TO_CONST(16, uint, zout, 0);
- STORE_BLOCK(K0, N0, DATA_TYPE, a, output_ptr, OUTPUT_STEP_X * sizeof(DATA_TYPE), zout);
-
-#undef BLOCK_SIZE
-#undef OUTPUT_OFFSET_X
-#undef OUTPUT_STEP_X
-}
-
-#if defined(TRANSPOSE)
-/** This OpenCL kernel reshapes the rhs input matrix. The kernel splits the input matrix in blocks of size K0xN0 and stores each one (transposed) in
- * the output matrix unrolling the values.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
- * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
- * @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (e.g. -DK0=2, -DN0=2).
- * @note The number of K0xN0 vertical blocks to store on the same output row must be passed at compile time using -DH0 (e.g. -DH0=2)
- * @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
- * @note The option -DTRANSPOSE must passed at compile time.
- * @note Only the following values for K0, N0 and H0 are supported:
- * N0: 2,3,4,8,16
- * K0: 2,3,4,8,16
- * H0: greater than 0
- *
- * @param[in] src_ptr Pointer to the source RHS tensor. Supported data types: All
- * @param[in] src_stride_x Stride of the source RHS tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source RHS tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source RHS tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source RHS tensor
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- */
-__kernel void gemm_reshape_rhs_matrix_t(TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- // Block size
-#define BLOCK_SIZE ((K0) * (N0))
-
- // Output offset X
-#if defined(INTERLEAVE)
-#define OUTPUT_OFFSET_X (K0)
-#else // defined(INTERLEAVE)
-#define OUTPUT_OFFSET_X (BLOCK_SIZE)
-#endif // defined(INTERLEAVE)
-
- // Output step X
-#if defined(INTERLEAVE)
-#define OUTPUT_STEP_X (K0) * (H0)
-#else // Do not interleave
-#define OUTPUT_STEP_X (K0)
-#endif // defined(INTERLEAVE)
-
- // Compute source and destination addresses
- uint x = get_global_id(0);
- uint y = get_global_id(1);
- uint z = get_global_id(2);
-
- // ------------------ Compute input/output addresses ---------------------------
-
- // Compute the input address
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)N0 * sizeof(DATA_TYPE) + y * (uint)K0 * src_stride_y + z * (uint)src_stride_z;
-
- // Compute the output address
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (y * (uint)BLOCK_SIZE * (uint)H0 * sizeof(DATA_TYPE)) + ((x % H0) * (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE)) + ((x /
- (uint)H0) * (uint)dst_stride_y) + z * (uint)dst_stride_z;
-
- // ---------------------------Load input values --------------------------------
- REPEAT_VAR_INIT_TO_CONST(K0, VEC_DATA_TYPE(DATA_TYPE, N0), a, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) a0=0, a1=0, ... a(K0-1)=0;
-
- // Load values from the RHS matrix
- a0 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y));
- if(y * (uint)K0 + 1 < SRC_HEIGHT)
- {
- a1 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y));
- }
-#if K0 > 2
- if(y * (uint)K0 + 2 < SRC_HEIGHT)
- {
- a2 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 2 * src_stride_y));
- }
-#endif // K0 > 2
-#if K0 > 3
- if(y * (uint)K0 + 3 < SRC_HEIGHT)
- {
- a3 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 3 * src_stride_y));
- }
-#endif // K0 > 3
-#if K0 > 4
- if(y * (uint)K0 + 4 < SRC_HEIGHT)
- {
- a4 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 4 * src_stride_y));
- }
- if(y * (uint)K0 + 5 < SRC_HEIGHT)
- {
- a5 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 5 * src_stride_y));
- }
- if(y * (uint)K0 + 6 < SRC_HEIGHT)
- {
- a6 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 6 * src_stride_y));
- }
- if(y * (uint)K0 + 7 < SRC_HEIGHT)
- {
- a7 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 7 * src_stride_y));
- }
-#endif // K0 > 4
-#if K0 > 8
- if(y * (uint)K0 + 8 < SRC_HEIGHT)
- {
- a8 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 8 * src_stride_y));
- }
- if(y * (uint)K0 + 9 < SRC_HEIGHT)
- {
- a9 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 9 * src_stride_y));
- }
- if(y * (uint)K0 + 10 < SRC_HEIGHT)
- {
- aA = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 10 * src_stride_y));
- }
- if(y * (uint)K0 + 11 < SRC_HEIGHT)
- {
- aB = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 11 * src_stride_y));
- }
- if(y * (uint)K0 + 12 < SRC_HEIGHT)
- {
- aC = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 12 * src_stride_y));
- }
- if(y * (uint)K0 + 13 < SRC_HEIGHT)
- {
- aD = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 13 * src_stride_y));
- }
- if(y * (uint)K0 + 14 < SRC_HEIGHT)
- {
- aE = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 14 * src_stride_y));
- }
- if(y * (uint)K0 + 15 < SRC_HEIGHT)
- {
- aF = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 15 * src_stride_y));
- }
-#endif // K0 > 8
-
- // ---------------------------Transpose the block ------------------------------
- REPEAT_VAR_INIT_TO_CONST(N0, VEC_DATA_TYPE(DATA_TYPE, K0), res, 0); //VEC_DATA_TYPE(DATA_TYPE, K0) res0=0, res1=0, res2=0,... res(N0-1)=0;
-
-#if K0 == 2
- // This part computes the following transpositions:
- // 2x2 -> 2x2
- // 2x4 -> 4x2
- // 2x8 -> 8x2
- // 2x16 -> 16x2
- res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0);
- res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1);
-#if N0 > 2
- res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2);
-#endif // N0 > 2
-#if N0 > 3
- res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3);
-#endif // N0 > 3
-#if N0 > 4
- res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4);
- res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5);
- res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6);
- res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7);
-#endif // N0 > 4
-#if N0 > 8
- res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8);
- res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9);
- resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA);
- resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB);
- resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC);
- resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD);
- resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE);
- resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF);
-#endif // N0 > 8
-
-#elif K0 == 3 // K0 == 2
- // This part computes the following transpositions:
- // 3x2 -> 2x3
- // 3x4 -> 4x3
- // 3x8 -> 8x3
- // 3x16 -> 16x3
- res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0);
- res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1);
-#if N0 > 2
- res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2);
-#endif // N0 > 2
-#if N0 > 3
- res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3);
-#endif // N0 > 3
-#if N0 > 4
- res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4);
- res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5);
- res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6);
- res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7);
-#endif // N0 > 4
-#if N0 > 8
- res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8);
- res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9);
- resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA);
- resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB);
- resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC);
- resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD);
- resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE);
- resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF);
-#endif // N0 > 8
-
-#elif K0 == 4 // K0 == 4
- // This part computes the following transpositions:
- // 4x2 -> 2x4
- // 4x4 -> 4x4
- // 4x8 -> 8x4
- // 4x16 -> 16x4
- res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0, a3.s0);
- res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1, a3.s1);
-#if N0 > 2
- res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2, a3.s2);
-#endif // N0 > 2
-#if N0 > 3
- res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3, a3.s3);
-#endif // N0 > 3
-#if N0 > 4
- res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4, a3.s4);
- res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5, a3.s5);
- res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6, a3.s6);
- res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7, a3.s7);
-#endif // N0 > 4
-#if N0 > 8
- res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8, a3.s8);
- res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9, a3.s9);
- resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA, a3.sA);
- resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB, a3.sB);
- resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC, a3.sC);
- resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD, a3.sD);
- resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE, a3.sE);
- resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF, a3.sF);
-#endif // N0 > 8
-
-#elif K0 == 8 // K0 == 8
- // This part computes the following transpositions:
- // 8x2 -> 2x8
- // 8x4 -> 4x8
- // 8x8 -> 8x8
- // 8x16 -> 16x8
- res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0, a3.s0, a4.s0, a5.s0, a6.s0, a7.s0);
- res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1, a3.s1, a4.s1, a5.s1, a6.s1, a7.s1);
-#if N0 > 2
- res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2, a3.s2, a4.s2, a5.s2, a6.s2, a7.s2);
-#endif // N0 > 2
-#if N0 > 3
- res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3, a3.s3, a4.s3, a5.s3, a6.s3, a7.s3);
-#endif // N0 > 3
-#if N0 > 4
- res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4, a3.s4, a4.s4, a5.s4, a6.s4, a7.s4);
- res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5, a3.s5, a4.s5, a5.s5, a6.s5, a7.s5);
- res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6, a3.s6, a4.s6, a5.s6, a6.s6, a7.s6);
- res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7, a3.s7, a4.s7, a5.s7, a6.s7, a7.s7);
-#endif // N0 > 4
-#if N0 > 8
- res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8, a3.s8, a4.s8, a5.s8, a6.s8, a7.s8);
- res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9, a3.s9, a4.s9, a5.s9, a6.s9, a7.s9);
- resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA, a3.sA, a4.sA, a5.sA, a6.sA, a7.sA);
- resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB, a3.sB, a4.sB, a5.sB, a6.sB, a7.sB);
- resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC, a3.sC, a4.sC, a5.sC, a6.sC, a7.sC);
- resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD, a3.sD, a4.sD, a5.sD, a6.sD, a7.sD);
- resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE, a3.sE, a4.sE, a5.sE, a6.sE, a7.sE);
- resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF, a3.sF, a4.sF, a5.sF, a6.sF, a7.sF);
-#endif // N0 > 8
-
-#elif K0 == 16 // K0 == 16
-
- // This part computes the following transpositions:
- // 16x2 -> 2x16
- // 16x4 -> 4x16
- // 16x8 -> 8x16
- // 16x16 -> 16x16
- res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0, a3.s0, a4.s0, a5.s0, a6.s0, a7.s0,
- a8.s0, a9.s0, aA.s0, aB.s0, aC.s0, aD.s0, aE.s0, aF.s0);
- res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1, a3.s1, a4.s1, a5.s1, a6.s1, a7.s1,
- a8.s1, a9.s1, aA.s1, aB.s1, aC.s1, aD.s1, aE.s1, aF.s1);
-#if N0 > 2
- res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2, a3.s2, a4.s2, a5.s2, a6.s2, a7.s2,
- a8.s2, a9.s2, aA.s2, aB.s2, aC.s2, aD.s2, aE.s2, aF.s2);
-#endif // N0 > 2
-#if N0 > 3
- res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3, a3.s3, a4.s3, a5.s3, a6.s3, a7.s3,
- a8.s3, a9.s3, aA.s3, aB.s3, aC.s3, aD.s3, aE.s3, aF.s3);
-#endif // N0 > 3
-#if N0 > 4
- res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4, a3.s4, a4.s4, a5.s4, a6.s4, a7.s4,
- a8.s4, a9.s4, aA.s4, aB.s4, aC.s4, aD.s4, aE.s4, aF.s4);
- res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5, a3.s5, a4.s5, a5.s5, a6.s5, a7.s5,
- a8.s5, a9.s5, aA.s5, aB.s5, aC.s5, aD.s5, aE.s5, aF.s5);
- res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6, a3.s6, a4.s6, a5.s6, a6.s6, a7.s6,
- a8.s6, a9.s6, aA.s6, aB.s6, aC.s6, aD.s6, aE.s6, aF.s6);
- res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7, a3.s7, a4.s7, a5.s7, a6.s7, a7.s7,
- a8.s7, a9.s7, aA.s7, aB.s7, aC.s7, aD.s7, aE.s7, aF.s7);
-#endif // N0 > 4
-#if N0 > 8
- res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8, a3.s8, a4.s8, a5.s8, a6.s8, a7.s8,
- a8.s8, a9.s8, aA.s8, aB.s8, aC.s8, aD.s8, aE.s8, aF.s8);
- res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9, a3.s9, a4.s9, a5.s9, a6.s9, a7.s9,
- a8.s9, a9.s9, aA.s9, aB.s9, aC.s9, aD.s9, aE.s9, aF.s9);
- resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA, a3.sA, a4.sA, a5.sA, a6.sA, a7.sA,
- a8.sA, a9.sA, aA.sA, aB.sA, aC.sA, aD.sA, aE.sA, aF.sA);
- resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB, a3.sB, a4.sB, a5.sB, a6.sB, a7.sB,
- a8.sB, a9.sB, aA.sB, aB.sB, aC.sB, aD.sB, aE.sB, aF.sB);
- resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC, a3.sC, a4.sC, a5.sC, a6.sC, a7.sC,
- a8.sC, a9.sC, aA.sC, aB.sC, aC.sC, aD.sC, aE.sC, aF.sC);
- resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD, a3.sD, a4.sD, a5.sD, a6.sD, a7.sD,
- a8.sD, a9.sD, aA.sD, aB.sD, aC.sD, aD.sD, aE.sD, aF.sD);
- resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE, a3.sE, a4.sE, a5.sE, a6.sE, a7.sE,
- a8.sE, a9.sE, aA.sE, aB.sE, aC.sE, aD.sE, aE.sE, aF.sE);
- resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF, a3.sF, a4.sF, a5.sF, a6.sF, a7.sF,
- a8.sF, a9.sF, aA.sF, aB.sF, aC.sF, aD.sF, aE.sF, aF.sF);
-#endif // N0 > 8
-
-#else // N0 == 16
-#error "Not supported N0 value"
-#endif // N0 > 2
-
- // ---------------------------Store the output values ------------------------------
- REPEAT_VAR_INIT_TO_CONST(16, uint, zout, 0);
- STORE_BLOCK(N0, K0, DATA_TYPE, res, output_ptr, OUTPUT_STEP_X * sizeof(DATA_TYPE), zout);
-
-#undef BLOCK_SIZE
-#undef OUTPUT_OFFSET_X
-#undef OUTPUT_STEP_X
-}
-#endif // defined(TRANSPOSE)
-#endif // defined(K0) && defined(N0) && defined(H0) && defined(DATA_TYPE) && defined(SRC_HEIGHT)
-
-#if defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(DATA_TYPE) && defined(M) && defined(N) && defined(K)
+#if defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(DATA_TYPE)
#define CONCAT(a, b) a##b
@@ -997,13 +148,13 @@ __kernel void gemm_reshape_rhs_matrix_t(TENSOR3D_DECLARATION(src),
#error "N0 value not supported"
#endif // N0 conditions
+#if defined(GEMM_MM_RESHAPED_ONLY_RHS_T)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix is NOT reshaped
* The RHS is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
*
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
- * @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90)
- * @note The number of columns of LHS matrix must be passed at compile time using -DK (e.g. -DK=64)
+ * @note The GEMM's dimensions (M,N and K) must be passed at runtime as kernel parameters.
* @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (e.g. -DN0=8, -DK0=4).
* @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
@@ -1055,6 +206,9 @@ __kernel void gemm_reshape_rhs_matrix_t(TENSOR3D_DECLARATION(src),
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
+ * @param[in] M Number of rows in LHS matrix not reshaped.
+ * @param[in] N Number of columns in RHS matrix not reshaped.
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped.
*/
__kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(rhs),
@@ -1076,7 +230,10 @@ __kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
- )
+ ,
+ const int M,
+ const int N,
+ const int K)
{
// Block size
#define RHS_BLOCK_SIZE ((K0) * (N0))
@@ -1096,6 +253,9 @@ __kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
uint y = get_global_id(1);
uint z = get_global_id(2);
+ const bool cond_y = y == 0;
+ const bool cond_x = ((x + 1) * N0 >= N);
+
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
@@ -1250,7 +410,7 @@ __kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
- LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, 1, PARTIAL_STORE_N0, false, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
@@ -1262,7 +422,7 @@ __kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z;
- LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
@@ -1275,28 +435,27 @@ __kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, N0, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
- const bool cond_y = y == 0;
- const bool cond_x = ((x + 1) * N0 >= N);
-
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
+#undef RHS_STEP_LOOP
}
+#endif // defined(GEMM_MM_RESHAPED_ONLY_RHS_T)
-#if defined(OPENCL_IMAGE_SUPPORT)
+#if defined(OPENCL_IMAGE_SUPPORT) && defined(GEMM_MM_RESHAPED_ONLY_RHS_T_TEXTURE)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices. The RHS matrix is stored in OpenCL image
* The LHS matrix is NOT reshaped
* The RHS is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
*
* @note -DOPENCL_IMAGE_SUPPORT must be passed at compile time in order to compile this OpenCL kernel
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
- * @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90)
+ * @note The GEMM's dimensions (M,N and K) must be passed at runtime as kernel parameters.
* @note The height of the RHS matrix, defined before creating the OpenCL image object from the OpenCL buffer, should be passed at compile time using -DRHS_HEIGHT=<value> (e.g. -DRHS_HEIGHT=32)
* Since we cannot create a 3d image from a buffer, the third dimension could be collapsed with the second dimension so RHS_HEIGHT
* could be different from the value returned by get_image_height(rhs_img).
@@ -1346,6 +505,9 @@ __kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
+ * @param[in] M Number of rows in LHS matrix not reshaped.
+ * @param[in] N Number of columns in RHS matrix not reshaped.
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped.
*/
__kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
__read_only image2d_t rhs_img,
@@ -1367,12 +529,15 @@ __kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
- )
+ ,
+ const int M,
+ const int N,
+ const int K)
{
// Pixel unit
#define PIXEL_UNIT CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(K0)
-#define LEFTOVER_K (K % K0)
+ const uint LEFTOVER_K = K % K0;
// Block size
#define RHS_BLOCK_SIZE (PIXEL_UNIT * (N0))
@@ -1392,6 +557,9 @@ __kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
uint y = get_global_id(1);
uint z = get_global_id(2);
+ const bool cond_y = y == 0;
+ const bool cond_x = ((x + 1) * N0 >= N);
+
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
@@ -1472,99 +640,100 @@ __kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
x_rhs += N0 * RHS_STEP_X * RHS_STEP_LOOP;
}
-#if LEFTOVER_K != 0
- // Note: We cannot read out-of-bound elements from the RHS matrix because
- // the RHS width is always multiple of K0. This is not be true for the LHS matrix
-
- union UNION_VEC_TYPE
+ if(LEFTOVER_K != 0)
{
- DATA_TYPE s[K0];
- VEC_DATA_TYPE(DATA_TYPE, K0)
- v;
- };
-
- union UNION_VEC_TYPE a0 = {.v = 0 };
+ // Note: We cannot read out-of-bound elements from the RHS matrix because
+ // the RHS width is always multiple of K0. This is not be true for the LHS matrix
+ // Left-over accumulations for LHS matrix
+
+ union UNION_VEC_TYPE
+ {
+ DATA_TYPE s[K0];
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ v;
+ };
+
+ union UNION_VEC_TYPE a0 = {.v = 0 };
#if M0 > 1
- union UNION_VEC_TYPE a1 = {.v = 0 };
+ union UNION_VEC_TYPE a1 = {.v = 0 };
#endif // M0 > 1
#if M0 > 2
- union UNION_VEC_TYPE a2 = {.v = 0 };
+ union UNION_VEC_TYPE a2 = {.v = 0 };
#endif // M0 > 2
#if M0 > 3
- union UNION_VEC_TYPE a3 = {.v = 0 };
+ union UNION_VEC_TYPE a3 = {.v = 0 };
#endif // M0 > 3
#if M0 > 4
- union UNION_VEC_TYPE a4 = {.v = 0 };
+ union UNION_VEC_TYPE a4 = {.v = 0 };
#endif // M0 > 4
#if M0 > 5
- union UNION_VEC_TYPE a5 = {.v = 0 };
+ union UNION_VEC_TYPE a5 = {.v = 0 };
#endif // M0 > 5
#if M0 > 6
- union UNION_VEC_TYPE a6 = {.v = 0 };
+ union UNION_VEC_TYPE a6 = {.v = 0 };
#endif // M0 > 6
#if M0 > 7
- union UNION_VEC_TYPE a7 = {.v = 0 };
+ union UNION_VEC_TYPE a7 = {.v = 0 };
#endif // M0 > 7
- REPEAT_VAR_INIT_TO_CONST(N0, VEC_DATA_TYPE(DATA_TYPE, K0), b, 0);
+ REPEAT_VAR_INIT_TO_CONST(N0, VEC_DATA_TYPE(DATA_TYPE, K0), b, 0);
- // Load from RHS matrix
- LOAD_TEXTURE2D(N0, PIXEL_UNIT, DATA_TYPE, b, rhs_img, x_rhs, y_rhs, RHS_STEP_X, 0);
+ // Load from RHS matrix
+ LOAD_TEXTURE2D(N0, PIXEL_UNIT, DATA_TYPE, b, rhs_img, x_rhs, y_rhs, RHS_STEP_X, 0);
- // Load from LHS matrix
- for(int k = 0; k < LEFTOVER_K; ++k)
- {
- a0.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 0 * lhs_stride_y + zlhs0);
+ // Load from LHS matrix
+ for(int k = 0; k < LEFTOVER_K; ++k)
+ {
+ a0.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 0 * lhs_stride_y + zlhs0);
#if M0 > 1
- a1.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 1 * lhs_stride_y + zlhs1);
+ a1.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 1 * lhs_stride_y + zlhs1);
#endif // M0 > 1
#if M0 > 2
- a2.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 2 * lhs_stride_y + zlhs2);
+ a2.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 2 * lhs_stride_y + zlhs2);
#endif // M0 > 2
#if M0 > 3
- a3.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 3 * lhs_stride_y + zlhs3);
+ a3.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 3 * lhs_stride_y + zlhs3);
#endif // M0 > 3
#if M0 > 4
- a4.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 4 * lhs_stride_y + zlhs4);
+ a4.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 4 * lhs_stride_y + zlhs4);
#endif // M0 > 4
#if M0 > 5
- a5.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 5 * lhs_stride_y + zlhs5);
+ a5.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 5 * lhs_stride_y + zlhs5);
#endif // M0 > 5
#if M0 > 6
- a6.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 6 * lhs_stride_y + zlhs6);
+ a6.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 6 * lhs_stride_y + zlhs6);
#endif // M0 > 6
#if M0 > 7
- a7.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 7 * lhs_stride_y + zlhs7);
+ a7.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 7 * lhs_stride_y + zlhs7);
#endif // M0 > 7
- lhs_offset += sizeof(DATA_TYPE);
- }
+ lhs_offset += sizeof(DATA_TYPE);
+ }
- // Accumulate
- ARM_DOT_K0XN0(K0, a0.v, b, c0);
+ // Accumulate
+ ARM_DOT_K0XN0(K0, a0.v, b, c0);
#if M0 > 1
- ARM_DOT_K0XN0(K0, a1.v, b, c1);
+ ARM_DOT_K0XN0(K0, a1.v, b, c1);
#endif // M0 > 1
#if M0 > 2
- ARM_DOT_K0XN0(K0, a2.v, b, c2);
+ ARM_DOT_K0XN0(K0, a2.v, b, c2);
#endif // M0 > 2
#if M0 > 3
- ARM_DOT_K0XN0(K0, a3.v, b, c3);
+ ARM_DOT_K0XN0(K0, a3.v, b, c3);
#endif // M0 > 3
#if M0 > 4
- ARM_DOT_K0XN0(K0, a4.v, b, c4);
+ ARM_DOT_K0XN0(K0, a4.v, b, c4);
#endif // M0 > 4
#if M0 > 5
- ARM_DOT_K0XN0(K0, a5.v, b, c5);
+ ARM_DOT_K0XN0(K0, a5.v, b, c5);
#endif // M0 > 5
#if M0 > 6
- ARM_DOT_K0XN0(K0, a6.v, b, c6);
+ ARM_DOT_K0XN0(K0, a6.v, b, c6);
#endif // M0 > 6
#if M0 > 7
- ARM_DOT_K0XN0(K0, a7.v, b, c7);
+ ARM_DOT_K0XN0(K0, a7.v, b, c7);
#endif // M0 > 7
-
-#endif // LEFTOVER_K != 0
+ }
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
@@ -1596,7 +765,7 @@ __kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
- LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, 1, PARTIAL_STORE_N0, false, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
@@ -1608,7 +777,7 @@ __kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z;
- LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
@@ -1621,22 +790,19 @@ __kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, N0, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
- const bool cond_y = y == 0;
- const bool cond_x = ((x + 1) * N0 >= N);
-
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
-#undef LEFTOVER_K
+#undef RHS_STEP_LOOP
#undef PIXEL_UNIT
}
-#endif // defined(OPENCL_IMAGE_SUPPORT)
+#endif // defined(OPENCL_IMAGE_SUPPORT) && defined(GEMM_MM_RESHAPED_ONLY_RHS_T_TEXTURE)
#define VFMA(a, b, c) \
({ \
@@ -1715,12 +881,13 @@ __kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
#error "M0 not supported"
#endif // M0 not supported
+#if defined(GEMM_MM_RESHAPED_ONLY_RHS_NT)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix is NOT reshaped
* The RHS is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is NOT transposed
*
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
- * @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90).
+ * @note The GEMM's dimensions (M,N and K) must be passed at runtime as kernel parameters.
* @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (e.g. -DN0=8, -DK0=4).
* @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
@@ -1772,6 +939,9 @@ __kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
+ * @param[in] M Number of rows in LHS matrix not reshaped.
+ * @param[in] N Number of columns in RHS matrix not reshaped.
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped.
*/
__kernel void gemm_mm_reshaped_only_rhs_nt(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(rhs),
@@ -1793,7 +963,10 @@ __kernel void gemm_mm_reshaped_only_rhs_nt(IMAGE_DECLARATION(lhs),
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
- )
+ ,
+ const int M,
+ const int N,
+ const int K)
{
// Block size
#define RHS_BLOCK_SIZE ((K0) * (N0))
@@ -1813,6 +986,9 @@ __kernel void gemm_mm_reshaped_only_rhs_nt(IMAGE_DECLARATION(lhs),
uint y = get_global_id(1);
uint z = get_global_id(2);
+ const bool cond_y = y == 0;
+ const bool cond_x = ((x + 1) * N0 >= N);
+
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
@@ -1992,7 +1168,7 @@ __kernel void gemm_mm_reshaped_only_rhs_nt(IMAGE_DECLARATION(lhs),
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
- LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, 1, PARTIAL_STORE_N0, false, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
@@ -2004,7 +1180,7 @@ __kernel void gemm_mm_reshaped_only_rhs_nt(IMAGE_DECLARATION(lhs),
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z;
- LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
@@ -2017,28 +1193,27 @@ __kernel void gemm_mm_reshaped_only_rhs_nt(IMAGE_DECLARATION(lhs),
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, N0, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
- const bool cond_y = y == 0;
- const bool cond_x = ((x + 1) * N0 >= N);
-
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
+#undef RHS_STEP_LOOP
}
+#endif // defined(GEMM_MM_RESHAPED_ONLY_RHS_NT)
-#if defined(OPENCL_IMAGE_SUPPORT)
+#if defined(OPENCL_IMAGE_SUPPORT) && defined(GEMM_MM_RESHAPED_ONLY_RHS_NT_TEXTURE)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix is NOT reshaped
* The RHS is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is NOT transposed
*
* @note -DOPENCL_IMAGE_SUPPORT must be passed at compile time in order to compile this OpenCL kernel
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
- * @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90).
+ * @note The GEMM's dimensions (M,N and K) must be passed at runtime as kernel parameters.
* @note The height of the RHS matrix, defined before creating the OpenCL image object from the OpenCL buffer, should be passed at compile time using -DRHS_HEIGHT=<value> (e.g. -DRHS_HEIGHT=32)
* Since we cannot create a 3d image from a buffer, the third dimension could be collapsed with the second dimension so RHS_HEIGHT
* could be different from the value returned by get_image_height(rhs_img).
@@ -2088,6 +1263,9 @@ __kernel void gemm_mm_reshaped_only_rhs_nt(IMAGE_DECLARATION(lhs),
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
+ * @param[in] M Number of rows in LHS matrix not reshaped.
+ * @param[in] N Number of columns in RHS matrix not reshaped.
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped.
*/
__kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
__read_only image2d_t rhs_img,
@@ -2109,7 +1287,10 @@ __kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
- )
+ ,
+ const int M,
+ const int N,
+ const int K)
{
// Pixel unit
#define PIXEL_UNIT CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(N0)
@@ -2121,15 +1302,20 @@ __kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (PIXEL_UNIT)
#define RHS_STEP_X ((PIXEL_UNIT) * (H0))
+#define RHS_STEP_LOOP 1
#else // defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
#define RHS_STEP_X (PIXEL_UNIT)
+#define RHS_STEP_LOOP (H0)
#endif // defined(RHS_INTERLEAVE)
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
+ const bool cond_y = y == 0;
+ const bool cond_x = ((x + 1) * N0 >= N);
+
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
@@ -2301,7 +1487,7 @@ __kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
- LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, 1, PARTIAL_STORE_N0, false, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
@@ -2313,7 +1499,7 @@ __kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z;
- LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
@@ -2326,23 +1512,21 @@ __kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, N0, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
- const bool cond_y = y == 0;
- const bool cond_x = ((x + 1) * N0 >= N);
-
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
+#undef RHS_STEP_LOOP
}
-#endif // defined(OPENCL_IMAGE_SUPPORT)
-#endif // defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(DATA_TYPE) && defined(M) && defined(N) && defined(K)
+#endif // defined(OPENCL_IMAGE_SUPPORT) && defined(GEMM_MM_RESHAPED_ONLY_RHS_NT_TEXTURE)
+#endif // defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(DATA_TYPE)
-#if defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(DATA_TYPE) && defined(DATA_TYPE_ACCUMULATOR) && defined(M) && defined(N)
+#if defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(DATA_TYPE) && defined(DATA_TYPE_ACCUMULATOR)
#if defined(MIXED_PRECISION)
#if K0 == 2
@@ -2460,6 +1644,10 @@ __kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#endif // K0 conditions
#endif // defined(MIXED_PRECISION)
+#if defined(ARM_DOT_K0XN0)
+#undef ARM_DOT_K0XN0
+#endif // defined(ARM_DOT_K0XN0)
+
#if N0 == 2
#define ARM_DOT_K0XN0(a, b, c) \
({ \
@@ -2517,6 +1705,7 @@ __kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#error "N0 value not supported"
#endif // N0 conditions
+#if defined(GEMM_MM_RESHAPED_LHS_NT_RHS_T)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be NOT transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be transposed
@@ -2572,12 +1761,14 @@ __kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
* @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
+ * @param[in] M Number of rows in LHS matrix not reshaped.
+ * @param[in] N Number of columns in RHS matrix not reshaped.
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped.
*/
__kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(rhs),
@@ -2585,7 +1776,6 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
- uint k,
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
@@ -2596,7 +1786,10 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
- )
+ ,
+ const int M,
+ const int N,
+ const int K)
{
// Block size
#define LHS_BLOCK_SIZE ((K0) * (M0))
@@ -2652,7 +1845,7 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
REPEAT_VAR_INIT_TO_CONST(M0, uint, zlhs, 0); //uint zlhs0=0,zlhs1=0,zlhs2=0,... zlhs7=0;
REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0);
- for(int i = 0; i < k; i += K0)
+ for(int i = 0; i < K; i += K0)
{
// Supported cases (M0, K0):
// 1,2 - 1,3 - 1,4 - 1,8 - 1,16
@@ -2701,6 +1894,9 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0);
+ const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
+ const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
+
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
@@ -2726,7 +1922,7 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
- LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, 1, PARTIAL_STORE_N0, false, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
@@ -2744,7 +1940,7 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * bias_stride_y) + get_global_id(
2) * bias_stride_z;
- LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
@@ -2763,15 +1959,12 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
#if defined(ACTIVATION_TYPE)
#if defined(MIXED_PRECISION)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, N0, c, A_VAL, B_VAL);
#else // defined(MIXED_PRECISION)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, N0, c, A_VAL, B_VAL);
#endif // defined(MIXED_PRECISION)
#endif // defined(ACTIVATION_TYPE)
- const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
- const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
-
// Store output block
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE, c, c_lp);
@@ -2789,8 +1982,9 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
#undef LHS_STEP_LOOP
#undef RHS_STEP_LOOP
}
+#endif // defined(GEMM_MM_RESHAPED_LHS_NT_RHS_T)
-#if defined(OPENCL_IMAGE_SUPPORT)
+#if defined(OPENCL_IMAGE_SUPPORT) && defined(GEMM_MM_RESHAPED_LHS_NT_RHS_T_TEXTURE)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices. The RHS matrix is stored in OpenCL image object.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be NOT transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be transposed
@@ -2845,12 +2039,14 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
* @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
+ * @param[in] M Number of rows in LHS matrix not reshaped.
+ * @param[in] N Number of columns in RHS matrix not reshaped.
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped.
*/
__kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
__read_only image2d_t rhs_img,
@@ -2858,7 +2054,6 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
- uint k,
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
@@ -2869,7 +2064,10 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
- )
+ ,
+ const int M,
+ const int N,
+ const int K)
{
// Pixel unit
#define PIXEL_UNIT CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(K0)
@@ -2971,6 +2169,9 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0);
+ const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
+ const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
+
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
@@ -2996,7 +2197,7 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
- LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, 1, PARTIAL_STORE_N0, false, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
@@ -3014,7 +2215,7 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * bias_stride_y) + get_global_id(
2) * bias_stride_z;
- LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
@@ -3033,15 +2234,12 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
#if defined(ACTIVATION_TYPE)
#if defined(MIXED_PRECISION)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, N0, c, A_VAL, B_VAL);
#else // defined(MIXED_PRECISION)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, N0, c, A_VAL, B_VAL);
#endif // defined(MIXED_PRECISION)
#endif // defined(ACTIVATION_TYPE)
- const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
- const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
-
// Store output block
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE, c, c_lp);
@@ -3060,7 +2258,7 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
#undef LHS_STEP_LOOP
#undef RHS_STEP_LOOP
}
-#endif // defined(OPENCL_IMAGE_SUPPORT)
+#endif // defined(OPENCL_IMAGE_SUPPORT) && defined(GEMM_MM_RESHAPED_LHS_NT_RHS_T_TEXTURE)
#if defined(LHS_TRANSPOSE)
@@ -3172,6 +2370,7 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
CONCAT(ARM_MM_T_NT_M0xN0x, K0) \
(M0, N0, TYPE, A, B, C)
+#if defined(GEMM_MM_RESHAPED_LHS_T_RHS_NT)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be NOT transposed
@@ -3225,12 +2424,14 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
* @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
+ * @param[in] M Number of rows in LHS matrix not reshaped.
+ * @param[in] N Number of columns in RHS matrix not reshaped.
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped.
*/
__kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(rhs),
@@ -3238,7 +2439,6 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
- uint k,
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
@@ -3249,7 +2449,10 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
- )
+ ,
+ const int M,
+ const int N,
+ const int K)
{
// Block size
#define LHS_BLOCK_SIZE ((K0) * (M0))
@@ -3280,6 +2483,9 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
const uint y = get_global_id(1);
const uint z = get_global_id(2);
+ const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
+ const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
+
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
@@ -3308,7 +2514,7 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
__global DATA_TYPE *lhs = (__global DATA_TYPE *)(lhs_addr);
__global DATA_TYPE *rhs = (__global DATA_TYPE *)(rhs_addr);
- for(int i = 0; i < k; i += K0)
+ for(int i = 0; i < K; i += K0)
{
VEC_DATA_TYPE(DATA_TYPE, M0)
a0;
@@ -3491,7 +2697,7 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE));
- LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, 1, PARTIAL_STORE_N0, false, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
@@ -3509,7 +2715,7 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * bias_stride_y) + get_global_id(
2) * bias_stride_z;
- LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
@@ -3527,15 +2733,12 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
#if defined(ACTIVATION_TYPE)
#if defined(MIXED_PRECISION)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, N0, c, A_VAL, B_VAL);
#else // defined(MIXED_PRECISION)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, N0, c, A_VAL, B_VAL);
#endif // defined(MIXED_PRECISION)
#endif // defined(ACTIVATION_TYPE)
- const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
- const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
-
// Store output block
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE, c, c_lp);
@@ -3551,8 +2754,9 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
#undef RHS_OFFSET_X
#undef RHS_STEP_X
}
+#endif // defined(GEMM_MM_RESHAPED_LHS_T_RHS_NT)
-#if defined(OPENCL_IMAGE_SUPPORT)
+#if defined(OPENCL_IMAGE_SUPPORT) && defined(GEMM_MM_RESHAPED_LHS_T_RHS_NT_TEXTURE)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices. The RHS matrix is stored in OpenCL image object.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be NOT transposed
@@ -3560,7 +2764,7 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
* @note -DOPENCL_IMAGE_SUPPORT must be passed at compile time in order to compile this OpenCL kernel
* @note LHS_TRANSPOSE should be passed at compile time in order to compile this OpenCL kernel (e.g. -DLHS_TRANSPOSE).
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
- * @note The GEMM's dimensions M, N and K must be passed at compile time using -DM, -DN and -DK (e.g. -DM=52, -DN=90 and -DK=24).
+ * @note The GEMM's dimensions M, N and K must be passed at runtime.
* @note The height of the RHS matrix, defined before creating the OpenCL image object from the OpenCL buffer, should be passed at compile time using -DRHS_HEIGHT=<value> (e.g. -DRHS_HEIGHT=32)
* Since we cannot create a 3d image from a buffer, the third dimension could be collapsed with the second dimension so RHS_HEIGHT
* could be different from the value returned by get_image_height(rhs_img).
@@ -3605,12 +2809,14 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
* @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
+ * @param[in] M Number of rows in LHS matrix not reshaped.
+ * @param[in] N Number of columns in RHS matrix not reshaped.
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped.
*/
__kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
__read_only image2d_t rhs_img,
@@ -3618,7 +2824,6 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
- uint k,
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
@@ -3629,7 +2834,10 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
- )
+ ,
+ const int M,
+ const int N,
+ const int K)
{
// Pixel unit
#define PIXEL_UNIT CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(N0)
@@ -3834,6 +3042,9 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0);
+ const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
+ const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
+
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
@@ -3859,7 +3070,7 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE));
- LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, 1, PARTIAL_STORE_N0, false, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
@@ -3876,7 +3087,7 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (y * (uint)M0 * bias_stride_y) + z * bias_stride_z;
- LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
+ LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
@@ -3894,15 +3105,12 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#if defined(ACTIVATION_TYPE)
#if defined(MIXED_PRECISION)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, N0, c, A_VAL, B_VAL);
#else // defined(MIXED_PRECISION)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, N0, c, A_VAL, B_VAL);
#endif // defined(MIXED_PRECISION)
#endif // defined(ACTIVATION_TYPE)
- const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
- const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
-
// Store output block
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE, c, c_lp);
@@ -3921,13 +3129,13 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#undef LHS_STEP_LOOP
#undef RHS_STEP_LOOP
}
-#endif // defined(OPENCL_IMAGE_SUPPORT)
+#endif // defined(OPENCL_IMAGE_SUPPORT) && defined(GEMM_MM_RESHAPED_LHS_T_RHS_NT_TEXTURE)
#endif // defined(LHS_TRANSPOSE)
-#endif // defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(K) && defined(DATA_TYPE)
+#endif // defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(DATA_TYPE) && defined(DATA_TYPE_ACCUMULATOR)
-#if defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(DATA_TYPE)
+#if defined(M0) && defined(N0) && defined(K0) && defined(DATA_TYPE)
#define VFMA(a, b, c) \
({ \
@@ -4006,13 +3214,13 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
#error "M0 not supported"
#endif // M0 not supported
+#if defined(GEMM_MM_NATIVE)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix is NOT reshaped
* The RHS matrix is NOT reshaped
*
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
- * @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90)
- * @note The number of columns of LHS matrix must be passed at compile time using -DK (e.g. -DK=64)
+ * @note The GEMM's dimensions (M,N and K) must be passed at runtime as kernel parameters.
* @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* @note The number of K0 partial accumulations must be passed at compile time using -DK0 (e.g., -DK0=2)
* @note The number of N0 columns to process must be passed at compile time using -DN0 (e.g. -DN0=2)
@@ -4060,6 +3268,9 @@ __kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
* @param[in] rhs_stride_z Stride of the RHS matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] M Number of rows in LHS matrix not reshaped.
+ * @param[in] N Number of columns in RHS matrix not reshaped.
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped.
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
@@ -4074,7 +3285,10 @@ __kernel void gemm_mm_native(IMAGE_DECLARATION(lhs),
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
- uint dst_stride_z
+ uint dst_stride_z,
+ const int M,
+ const int N,
+ const int K
#if defined(REINTERPRET_INPUT_AS_3D)
,
uint lhs_cross_plane_pad
@@ -4137,6 +3351,7 @@ __kernel void gemm_mm_native(IMAGE_DECLARATION(lhs),
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
int i = 0;
+#if K0 > 1
for(; i <= (K - K0); i += K0)
{
// Supported cases (M0, K0):
@@ -4182,7 +3397,7 @@ __kernel void gemm_mm_native(IMAGE_DECLARATION(lhs),
lhs_offset += K0 * sizeof(DATA_TYPE);
rhs_offset += K0 * rhs_stride_y;
}
-
+#endif // K0 > 1
// Left-over accumulations
for(; i < K; ++i)
{
@@ -4280,7 +3495,7 @@ __kernel void gemm_mm_native(IMAGE_DECLARATION(lhs),
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
+ ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, N0, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = y == 0;
@@ -4288,12 +3503,9 @@ __kernel void gemm_mm_native(IMAGE_DECLARATION(lhs),
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-
-#undef RHS_BLOCK_SIZE
-#undef RHS_OFFSET_X
-#undef RHS_STEP_X
}
-#endif // defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(DATA_TYPE)
+#endif // defined(GEMM_MM_NATIVE)
+#endif // defined(M0) && defined(N0) && defined(K0) && defined(DATA_TYPE)
#if defined(BETA)
/** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
@@ -4379,4 +3591,4 @@ __kernel void gemm_ma_f16(TENSOR3D_DECLARATION(src),
vstore8(out, 0, (__global half *)dst.ptr);
}
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED)
-#endif // defined(BETA) \ No newline at end of file
+#endif // defined(BETA)
diff --git a/src/core/CL/cl_kernels/common/gemm_reshaped_only_rhs_mmul.cl b/src/core/CL/cl_kernels/common/gemm_reshaped_only_rhs_mmul.cl
new file mode 100644
index 0000000000..09b8956b68
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/gemm_reshaped_only_rhs_mmul.cl
@@ -0,0 +1,556 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#if defined(GEMM_MM_RESHAPED_ONLY_RHS_NT_MMUL)
+/** This OpenCL kernel computes the matrix multiplication between 2 matrices using the MMUL extension:
+ *
+ * The LHS matrix is NOT reshaped
+ * The RHS is reshaped with @ref ClGemmMatrixMultiplyReshapedOnlyRhsKernel and the block K0xN0 is NOT transposed
+ *
+ * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (e.g. -DN0=8, -DK0=4).
+ * @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of output columns processed by the the cooperative mmul extension must be passed at compile time using -DMMUL_N0 (e.g., -DMMUL_N0=2)
+ * @note The number of output rows processed by the the cooperative mmul extension must be passed at compile time using -DMMUL_M0 (e.g., -DMMUL_M0=2)
+ * @note The number of lhs columns (or rhs rows) processed by the the cooperative mmul extension must be passed at compile time using -DMMUL_K0 (e.g., -DMMUL_K0=2)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1
+ *
+ * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
+ * The activation function is performed after the bias addition
+ *
+ * @param[in] lhs_ptr Pointer to the LHS tensor. Supported data types: F16/F32
+ * @param[in] lhs_stride_y Stride of the LHS tensor in Y dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the LHS tensor in Z dimension (in bytes)
+ * @param[in] lhs_w The size of the width dimension of the LHS tensor
+ * @param[in] lhs_h The size of the height dimension of the LHS tensor
+ * @param[in] lhs_n The size of the depth dimension of the LHS tensor
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS tensor
+ * @param[in] rhs_ptr Pointer to the RHS reshaped tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the RHS tensor in Y dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the RHS tensor in Z dimension (in bytes)
+ * @param[in] rhs_w The size of the width dimension of the RHS tensor
+ * @param[in] rhs_h The size of the height dimension of the RHS tensor
+ * @param[in] rhs_n The size of the depth dimension of the RHS tensor
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS tensor
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bia_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bia_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bia_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bia_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bia_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the depth dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] M Number of rows in LHS matrix not reshaped
+ * @param[in] N Number of columns in RHS matrix not reshaped
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped
+ */
+__kernel void gemm_mm_reshaped_only_rhs_nt_mmul(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#if defined(BETA)
+ TENSOR3D_T(bia, BUFFER),
+#endif // defined(BETA)
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int N,
+ const int K)
+{
+#define MMUL_BLOCK_SIZE (MMUL_N0 * MMUL_K0)
+
+ uint x0 = get_global_id(0); // (N / N0) * MMUL_K0
+ uint y0 = get_global_id(1); // (M / M0) / MMUL_M0
+ uint z = get_global_id(2); // Batch
+
+ // Get block ID and thread ID within the block
+ uint block_id = (x0 / MMUL_BLOCK_SIZE);
+ uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+
+ // Coordinate within a block
+ uint block_x = thread_id % MMUL_N0;
+ uint block_y = (thread_id / MMUL_M0);
+
+ // Starting destination coordinates
+ uint dst_x = min(block_x * N0 + block_id * MMUL_N0 * N0, (uint)(N - 1));
+ uint dst_y = min(block_y * M0 + y0 * M0 * MMUL_M0, (uint)(M - M0));
+
+ // Note: We need to clamp dst_x and dst_y because we always need to execute a complete MMUL block! Only after the matrix multiplication
+ // part can we exit the kernel if it is out-of-bound. Remember, we have a cooperative matrix multiplication. Therefore, we need a full block to get the correct results
+
+ // Starting LHS coordinates
+ uint lhs_x = block_x;
+ uint lhs_y = dst_y;
+
+ // Starting RHS coordinates
+ uint rhs_x = block_y * N0 * MMUL_N0 + block_x * N0;
+ uint rhs_y = block_id;
+
+ // Compute LHS/RHS/DST matrix address
+#ifdef REINTERPRET_INPUT_AS_3D
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + (lhs_y + z * M) * lhs_stride_y;
+#else // REINTERPRET_INPUT_AS_3D
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+#endif // REINTERPRET_INPUT_AS_3D
+
+#ifdef BATCHED_RHS
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+#else // BATCHED_RHS
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y;
+#endif // BATCHED_RHS
+
+#ifdef REINTERPRET_OUTPUT_AS_3D
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + (dst_y + z * M) * dst_stride_y;
+#else // REINTERPRET_OUTPUT_AS_3D
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+#endif // REINTERPRET_OUTPUT_AS_3D
+
+ // Note: If RHS derives from the weights of convolution 2d layer, RHS will always be 2D and rhs_stride_z will always be equal to 0 for
+ // not sliding the tensor
+
+ // Initialize the accumulators
+ // MMUL extension accumulate the result in F32 for both F32 and F16
+ TILE(float, M0, N0, c_f32);
+
+#if !defined(HALF_PRECISION)
+#define c c_f32
+#endif // !defined(HALF_PRECISION)
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_f32[i].v = 0;
+ })
+
+ for(int k = 0; k <= K - MMUL_K0; k += MMUL_K0)
+ {
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, 1, N0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, 0, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c_f32[m0].s[n0] = arm_matrix_multiply(a[m0].s[0], b[0].s[n0], c_f32[m0].s[n0]);
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += MMUL_K0 * MMUL_N0 * N0 * sizeof(DATA_TYPE);
+ }
+
+ if(block_x * N0 + block_id * MMUL_N0 * N0 >= N)
+ {
+ return;
+ }
+
+ if(block_y * M0 + y0 * M0 * MMUL_M0 >= M)
+ {
+ return;
+ }
+
+#if defined(HALF_PRECISION)
+ TILE(DATA_TYPE, M0, N0, c);
+
+ // Conversion required for the half precision
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c[m0].s[n0] = c_f32[m0].s[n0];
+ })
+ })
+#endif // defined(HALF_PRECISION)
+
+ // Multiply by the weight of matrix-matrix product and store the result
+#if defined(ALPHA)
+ T_SCALE_CONSTANT(DATA_TYPE, M0, N0, c, (DATA_TYPE)ALPHA, c);
+#endif // defined(ALPHA)
+
+ // Add beta*bias
+#if defined(BETA)
+#if defined(BROADCAST_BIAS)
+ bia_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE);
+
+ TILE(DATA_TYPE, 1, N0, bias0);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ bias0[0].v = VLOAD(N0)(0, (DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes));
+ }
+ else
+ {
+ VLOAD_PARTIAL(N0, N0_LEFTOVER)
+ (bias0[0].v, 0, (DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes));
+ }
+
+#ifndef UNIT_BETA
+ T_SCALE_CONSTANT(DATA_TYPE, 1, N0, bias0, (DATA_TYPE)BETA, bias0);
+#endif // UNIT_BIAS
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_X(V_ADD, DATA_TYPE, M0, N0, c, bias0, c);
+#else // defined(BROADCAST_BIAS)
+ TILE(DATA_TYPE, M0, N0, bias0);
+
+ bia_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * bia_stride_y + z * bia_stride_z;
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ bias0[m0].v = VLOAD(N0)(0, (DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes + m0 * bia_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VLOAD_PARTIAL(N0, N0_LEFTOVER)
+ (bias0[m0].v, 0, (DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes + m0 * bia_stride_y));
+ }
+ })
+ }
+
+#ifndef UNIT_BETA
+ T_SCALE_CONSTANT(DATA_TYPE, M0, N0, bias0, (DATA_TYPE)BETA, bias0);
+#endif // UNIT_BIAS
+
+ // c = c + bias
+ T_ADD(DATA_TYPE, M0, N0, c, bias0, c);
+ // c = c + bias
+#endif // defined(BROADCAST_BIAS)
+#endif // defined(BETA)
+
+ T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
+
+ // Store
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+
+#undef RHS_BLOCK_SIZE
+#undef RHS_OFFSET_X
+#undef RHS_STEP_X
+}
+#endif // defined(GEMM_MM_RESHAPED_ONLY_RHS_MMUL)
+
+#if defined(GEMM_MM_RESHAPED_ONLY_RHS_NT_MMUL_TEXTURE)
+/** This OpenCL kernel computes the matrix multiplication between 2 matrices using the MMUL extension and the OpenCL image for RHS:
+ *
+ * The LHS matrix is NOT reshaped
+ * The RHS is reshaped with @ref ClGemmMatrixMultiplyReshapedOnlyRhsKernel and the block K0xN0 is NOT transposed
+ *
+ * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (e.g. -DN0=8, -DK0=4).
+ * @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of output columns processed by the the cooperative mmul extension must be passed at compile time using -DMMUL_N0 (e.g., -DMMUL_N0=2)
+ * @note The number of output rows processed by the the cooperative mmul extension must be passed at compile time using -DMMUL_M0 (e.g., -DMMUL_M0=2)
+ * @note The number of lhs columns (or rhs rows) processed by the the cooperative mmul extension must be passed at compile time using -DMMUL_K0 (e.g., -DMMUL_K0=2)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1
+ *
+ * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
+ * The activation function is performed after the bias addition
+ *
+ * @param[in] lhs_ptr Pointer to the LHS tensor. Supported data types: F16/F32
+ * @param[in] lhs_stride_y Stride of the LHS tensor in Y dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the LHS tensor in Z dimension (in bytes)
+ * @param[in] lhs_w The size of the width dimension of the LHS tensor
+ * @param[in] lhs_h The size of the height dimension of the LHS tensor
+ * @param[in] lhs_n The size of the depth dimension of the LHS tensor
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS tensor
+ * @param[in] rhs_ptr Pointer to the RHS reshaped tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the RHS tensor in Y dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the RHS tensor in Z dimension (in bytes)
+ * @param[in] rhs_w The size of the width dimension of the RHS tensor
+ * @param[in] rhs_h The size of the height dimension of the RHS tensor
+ * @param[in] rhs_n The size of the depth dimension of the RHS tensor
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS tensor
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bia_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bia_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bia_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bia_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bia_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the depth dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] M Number of rows in LHS matrix not reshaped
+ * @param[in] N Number of columns in RHS matrix not reshaped
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped
+ */
+__kernel void gemm_mm_reshaped_only_rhs_nt_mmul_texture(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, IMAGE),
+#if defined(BETA)
+ TENSOR3D_T(bia, BUFFER),
+#endif // defined(BETA)
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int N,
+ const int K)
+{
+#define MMUL_BLOCK_SIZE (MMUL_N0 * MMUL_K0)
+
+ uint x0 = get_global_id(0); // (N / N0) * MMUL_K0
+ uint y0 = get_global_id(1); // (M / M0) / MMUL_M0
+ uint z = get_global_id(2); // Batch
+
+ // Get block ID and thread ID within the block
+ uint block_id = (x0 / MMUL_BLOCK_SIZE);
+ uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+
+ // Coordinate within a block
+ uint block_x = thread_id % MMUL_N0;
+ uint block_y = (thread_id / MMUL_M0);
+
+ // Starting destination coordinates
+ uint dst_x = min(block_x * N0 + block_id * MMUL_N0 * N0, (uint)(N - 1));
+ uint dst_y = min(block_y * M0 + y0 * M0 * MMUL_M0, (uint)(M - M0));
+
+ // Note: We need to clamp dst_x and dst_y because we always need to execute a complete MMUL block! Only after the matrix multiplication
+ // part can we exit the kernel if it is out-of-bound. Remember, we have a cooperative matrix multiplication. Therefore, we need a full block to get the correct results
+
+ // Starting LHS coordinates
+ uint lhs_x = block_x;
+ uint lhs_y = dst_y;
+
+ // Starting RHS coordinates
+ uint rhs_x = block_y * N0 * MMUL_N0 + block_x * N0;
+
+#ifdef BATCHED_RHS
+ uint rhs_y = block_id + z * rhs_h;
+#else // BATCHED_RHS
+ uint rhs_y = block_id;
+#endif // BATCHED_RHS
+
+ // Compute LHS/RHS/DST matrix address
+#ifdef REINTERPRET_INPUT_AS_3D
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + (lhs_y + z * M) * lhs_stride_y;
+#else // REINTERPRET_INPUT_AS_3D
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+#endif // REINTERPRET_INPUT_AS_3D
+
+#ifdef REINTERPRET_OUTPUT_AS_3D
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + (dst_y + z * M) * dst_stride_y;
+#else // REINTERPRET_OUTPUT_AS_3D
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+#endif // REINTERPRET_OUTPUT_AS_3D
+
+ // Initialize the accumulators
+ // MMUL extension accumulate the result in F32 for both F32 and F16
+ TILE(float, M0, N0, c_f32);
+
+#if !defined(HALF_PRECISION)
+#define c c_f32
+#endif // !defined(HALF_PRECISION)
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_f32[i].v = 0;
+ })
+
+ for(int k = 0; k <= K - MMUL_K0; k += MMUL_K0)
+ {
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, 1, N0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, 1, N0, IMAGE, rhs, rhs_x, rhs_y, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c_f32[m0].s[n0] = arm_matrix_multiply(a[m0].s[0], b[0].s[n0], c_f32[m0].s[n0]);
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ rhs_x += MMUL_K0 * MMUL_N0 * N0;
+ }
+
+ if(block_x * N0 + block_id * MMUL_N0 * N0 >= N)
+ {
+ return;
+ }
+
+ if(block_y * M0 + y0 * M0 * MMUL_M0 >= M)
+ {
+ return;
+ }
+
+#if defined(HALF_PRECISION)
+ TILE(DATA_TYPE, M0, N0, c);
+
+ // Conversion required for the half precision
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c[m0].s[n0] = c_f32[m0].s[n0];
+ })
+ })
+#endif // defined(HALF_PRECISION)
+
+ // Multiply by the weight of matrix-matrix product and store the result
+#if defined(ALPHA)
+ T_SCALE_CONSTANT(DATA_TYPE, M0, N0, c, (DATA_TYPE)ALPHA, c);
+#endif // defined(ALPHA)
+
+ // Add beta*bias
+#if defined(BETA)
+#if defined(BROADCAST_BIAS)
+ bia_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE);
+
+ TILE(DATA_TYPE, 1, N0, bias0);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ bias0[0].v = VLOAD(N0)(0, (DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes));
+ }
+ else
+ {
+ VLOAD_PARTIAL(N0, N0_LEFTOVER)
+ (bias0[0].v, 0, (DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes));
+ }
+
+#ifndef UNIT_BETA
+ T_SCALE_CONSTANT(DATA_TYPE, 1, N0, bias0, (DATA_TYPE)BETA, bias0);
+#endif // UNIT_BIAS
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_X(V_ADD, DATA_TYPE, M0, N0, c, bias0, c);
+#else // defined(BROADCAST_BIAS)
+ TILE(DATA_TYPE, M0, N0, bias0);
+
+ bia_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * bia_stride_y + z * bia_stride_z;
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ bias0[m0].v = VLOAD(N0)(0, (DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes + m0 * bia_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VLOAD_PARTIAL(N0, N0_LEFTOVER)
+ (bias0[m0].v, 0, (DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes + m0 * bia_stride_y));
+ }
+ })
+ }
+
+#ifndef UNIT_BETA
+ T_SCALE_CONSTANT(DATA_TYPE, M0, N0, bias0, (DATA_TYPE)BETA, bias0);
+#endif // UNIT_BIAS
+
+ // c = c + bias
+ T_ADD(DATA_TYPE, M0, N0, c, bias0, c);
+ // c = c + bias
+#endif // defined(BROADCAST_BIAS)
+#endif // defined(BETA)
+
+ T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
+
+ // Store
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+
+#undef RHS_BLOCK_SIZE
+#undef RHS_OFFSET_X
+#undef RHS_STEP_X
+}
+#endif // defined(GEMM_MM_RESHAPED_ONLY_RHS_MMUL_TEXTURE)
diff --git a/src/core/CL/cl_kernels/common/gemm_utils.cl b/src/core/CL/cl_kernels/common/gemm_utils.cl
new file mode 100644
index 0000000000..be57d94ce6
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/gemm_utils.cl
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "gemm_helpers.h"
+#include "helpers.h"
+#include "repeat.h"
+#include "tile_helpers.h"
+
+#if defined(RESHAPE_LHS_NT)
+/** This OpenCL kernel reshapes the lhs input matrix. The kernel splits the input matrix in blocks of size M0xK0 and stores each one (not transposed) in
+ * the output matrix unrolling the values.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The width of the input tensor must be passed at compile time using -DSRC_WIDTH (e.g. -DSRC_WIDTH=16)
+ * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
+ * @note The block's dimensions (M0 and K0) must be passed at compile time using -DM0 and -DK0 (e.g. -DM0=2, -DK0=2).
+ * @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_M0 (e.g. -DPARTIAL_M0=1)
+ * @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_K0 (e.g. -DPARTIAL_K0=1)
+ * @note Only the following values for M0, K0 and V0 are supported:
+ * M0: 2,3,4,5,6,7,8
+ * K0: 2,3,4,8,16
+ * V0: greater than 0
+ * @note If the M0xK0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the depth dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: All
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the depth dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] M The size of height dimension of the source tensor, affected by reinterpret_input_as_3d
+ * @param[in] V0 The number of blocks to place on the same row. It must be greater than 0.
+ */
+__kernel void gemm_reshape_lhs_matrix_nt(TENSOR3D_T(src, BUFFER),
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int V0)
+{
+ // Block size
+#define BLOCK_SIZE ((M0) * (K0))
+
+ // Output offset X
+#if defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (K0)
+#else // defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (BLOCK_SIZE)
+#endif // defined(INTERLEAVE)
+
+ // Output step X
+#if defined(INTERLEAVE)
+#define OUTPUT_STEP_X (K0) * (V0)
+#else // Do not interleave
+#define OUTPUT_STEP_X (K0)
+#endif // defined(INTERLEAVE)
+
+ const int x = GET_SPATIAL_IDX(0, 1, 0); // K
+ const int y = GET_SPATIAL_IDX(1, 1, 0); // M
+ const int z = GET_SPATIAL_IDX(2, 1, 0); // Batch size
+
+ const int xi = x * K0;
+ const int yi = y * M0;
+
+ const int xo = x * BLOCK_SIZE * V0 + (y % V0) * OUTPUT_OFFSET_X;
+ const int yo = (y / V0);
+
+ // src_stride_z is expressed as M * src_stride_y, to handle case where reinterpret_input_as_3d=true
+ src_offset_first_element_in_bytes += yi * src_stride_y + z * M * src_stride_y;
+ dst_offset_first_element_in_bytes += yo * dst_stride_y + z * dst_stride_z;
+
+ TILE(DATA_TYPE, M0, K0, in);
+
+ // Initialize the input tile to zero
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ in[_i].v = 0;
+ });
+
+ bool x_cond = (xi + K0 >= src_w) && (PARTIAL_K0 != 0);
+ bool y_cond = (yi + M0 >= M) && (PARTIAL_M0 != 0);
+ // Load input tile
+ TILE(uint, M0, 1, in_indirect_y);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ in_indirect_y[_i].v = _i;
+
+ });
+#if PARTIAL_M0 != 0
+ if(y_cond)
+ {
+ T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, PARTIAL_M0, K0, PARTIAL_K0, BUFFER, src, xi, src_stride_y, x_cond, in, in_indirect_y);
+ }
+ else
+#endif // PARTIAL_M0 != 0
+ {
+ T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, K0, PARTIAL_K0, BUFFER, src, xi, src_stride_y, x_cond, in, in_indirect_y);
+ }
+
+ // Store output tile
+ TILE(uint, M0, 1, dst_indirect_y);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ dst_indirect_y[_i].v = _i;
+ });
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, K0, 0, BUFFER, dst, xo, (OUTPUT_STEP_X * sizeof(DATA_TYPE)), false, in, dst_indirect_y);
+#undef BLOCK_SIZE
+#undef OUTPUT_OFFSET_X
+#undef OUTPUT_STEP_X
+}
+#endif // defined(RESHAPE_LHS_NT)
+
+#if defined(RESHAPE_LHS_T)
+/** This OpenCL kernel reshapes the lhs input matrix. The kernel splits the input matrix in blocks of size M0xK0 and stores each one (transposed) in
+ * the output matrix unrolling the values.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The width of the input tensor must be passed at compile time using -DSRC_WIDTH (e.g. -DSRC_WIDTH=16)
+ * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
+ * @note The block's dimensions (M0 and K0) must be passed at compile time using -DM0 and -DK0 (e.g. -DM0=2, -DK0=2).
+ * @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_M0 (e.g. -DPARTIAL_M0=1)
+ * @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_K0 (e.g. -DPARTIAL_K0=1)
+ * @note Only the following values for M0, K0 and V0 are supported:
+ * M0: 2,3,4,8,16
+ * K0: 2,3,4,8,16
+ * V0: greater than 0
+ * @note If the M0xK0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the depth dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: All
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the depth dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] M The size of height dimension of the source tensor, affected by reinterpret_input_as_3d
+ * @param[in] V0 The number of blocks to place on the same row. It must be greater than 0
+ */
+__kernel void gemm_reshape_lhs_matrix_t(TENSOR3D_T(src, BUFFER),
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int V0)
+{
+ // Block size
+#define BLOCK_SIZE ((M0) * (K0))
+
+ // Output offset X
+#if defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (M0)
+#else // defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (BLOCK_SIZE)
+#endif // defined(INTERLEAVE)
+
+ // Output step X
+#if defined(INTERLEAVE)
+#define OUTPUT_STEP_X (M0) * (V0)
+#else // Do not interleave
+#define OUTPUT_STEP_X (M0)
+#endif // defined(INTERLEAVE)
+
+ const int x = GET_SPATIAL_IDX(0, 1, 0); // K
+ const int y = GET_SPATIAL_IDX(1, 1, 0); // M
+ const int z = GET_SPATIAL_IDX(2, 1, 0); // Batch size
+
+ const int xi = x * K0;
+ const int yi = y * M0;
+
+ const int xo = x * BLOCK_SIZE * V0 + ((y % V0) * OUTPUT_OFFSET_X);
+ const int yo = (y / V0);
+
+ // src_stride_z is expressed as M * src_stride_y, to handle case where reinterpret_input_as_3d=true
+ src_offset_first_element_in_bytes += yi * src_stride_y + z * M * src_stride_y;
+ dst_offset_first_element_in_bytes += yo * dst_stride_y + z * dst_stride_z;
+
+ TILE(DATA_TYPE, M0, K0, in);
+ TILE(DATA_TYPE, K0, M0, in_tr);
+
+ // Initialize the tile to zero
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ in[_i].v = 0;
+ });
+
+ // Load input tile
+ bool x_cond = (xi + K0 >= src_w) && (PARTIAL_K0 != 0);
+ bool y_cond = (yi + M0 >= M) && (PARTIAL_M0 != 0);
+
+ TILE(uint, M0, 1, in_indirect_y);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ in_indirect_y[_i].v = _i;
+
+ });
+#if PARTIAL_M0 != 0
+ if(y_cond)
+ {
+ T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, PARTIAL_M0, K0, PARTIAL_K0, BUFFER, src, xi, src_stride_y, x_cond, in, in_indirect_y);
+ }
+ else
+#endif // PARTIAL_M0 != 0
+ {
+ T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, K0, PARTIAL_K0, BUFFER, src, xi, src_stride_y, x_cond, in, in_indirect_y);
+ }
+ // Transpose input tile
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, k0, 0, 1, K0,
+ {
+ in_tr[k0].s[m0] = in[m0].s[k0];
+ })
+ });
+
+ TILE(uint, K0, 1, dst_indirect_y);
+ LOOP_UNROLLING(int, _i, 0, 1, K0,
+ {
+ dst_indirect_y[_i].v = _i;
+ });
+
+ // Store output tile
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, K0, M0, 0, BUFFER, dst, xo, (OUTPUT_STEP_X * sizeof(DATA_TYPE)), false, in_tr, dst_indirect_y);
+
+#undef BLOCK_SIZE
+#undef OUTPUT_OFFSET_X
+#undef OUTPUT_STEP_X
+}
+#endif // defined(RESHAPE_LHS_T)
+
+#if defined(RESHAPE_RHS_NT)
+/** This OpenCL kernel reshapes the rhs input matrix. The kernel splits the input matrix in blocks of size K0xN0 and stores each one (not transposed) in
+ * the output matrix unrolling the values.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (e.g. -DK0=2, -DN0=2).
+ * @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
+ * @note Only the following values for K0, N0 and H0 are supported:
+ * N0: 2,3,4,8,16
+ * K0: 1,2,3,4,8,16
+ * H0: greater than 0
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the depth dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: All
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the depth dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] H0 The number of blocks to place on the same row. It must be greater than 0
+ */
+__kernel void gemm_reshape_rhs_matrix_nt(TENSOR3D_T(src, BUFFER),
+ TENSOR3D_T(dst, BUFFER),
+ const int H0)
+{
+ // Block size
+#define BLOCK_SIZE ((K0) * (N0))
+
+ // Output offset X
+#if defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (N0)
+#else // defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (BLOCK_SIZE)
+#endif // defined(INTERLEAVE)
+
+ // Output step X
+#if defined(INTERLEAVE)
+#define OUTPUT_STEP_X (N0) * (H0)
+#else // Do not interleave
+#define OUTPUT_STEP_X (N0)
+#endif // defined(INTERLEAVE)
+
+ const int x = GET_SPATIAL_IDX(0, 1, 0);
+ const int y = GET_SPATIAL_IDX(1, 1, 0);
+ const int z = GET_SPATIAL_IDX(2, 1, 0);
+
+ const int xi = x * N0;
+ const int yi = y * K0;
+
+ const int xo = y * BLOCK_SIZE * H0 + (x % H0) * OUTPUT_OFFSET_X;
+ const int yo = (x / H0);
+
+ src_offset_first_element_in_bytes += yi * src_stride_y + z * src_stride_z;
+ dst_offset_first_element_in_bytes += yo * dst_stride_y + z * dst_stride_z;
+
+ TILE(DATA_TYPE, K0, N0, in);
+
+ // Initialize the tile to zero
+ for(int i = 0; i < K0; ++i)
+ {
+ in[i].v = 0;
+ }
+
+ // Load input tile
+ for(int i = 0; i < K0; ++i)
+ {
+ if(yi + i < src_h)
+ {
+ in[i].v = V_LOAD(DATA_TYPE, N0, BUFFER, src, xi, i, src_stride_y);
+ }
+ }
+
+ TILE(uint, K0, 1, dst_indirect_y);
+ for(int i = 0; i < K0; ++i)
+ {
+ dst_indirect_y[i].v = i;
+ }
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, K0, N0, 0, BUFFER, dst, xo, (OUTPUT_STEP_X * sizeof(DATA_TYPE)), false, in, dst_indirect_y);
+
+#undef BLOCK_SIZE
+#undef OUTPUT_OFFSET_X
+#undef OUTPUT_STEP_X
+}
+#endif // defined(RESHAPE_RHS_NT)
+
+#if defined(RESHAPE_RHS_T)
+/** This OpenCL kernel reshapes the rhs input matrix. The kernel splits the input matrix in blocks of size K0xN0 and stores each one (transposed) in
+ * the output matrix unrolling the values.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (e.g. -DK0=2, -DN0=2).
+ * @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
+ * @note The option -DTRANSPOSE must passed at compile time.
+ * @note Only the following values for K0, N0 and H0 are supported:
+ * N0: 2,3,4,8,16
+ * K0: 2,3,4,8,16
+ * H0: greater than 0
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the depth dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: All
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the depth dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] H0 The number of blocks to place on the same row. It must be greater than 0.
+ */
+__kernel void gemm_reshape_rhs_matrix_t(TENSOR3D_T(src, BUFFER),
+ TENSOR3D_T(dst, BUFFER),
+ const int H0)
+{
+ // Block size
+#define BLOCK_SIZE ((K0) * (N0))
+
+ // Output offset X
+#if defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (K0)
+#else // defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (BLOCK_SIZE)
+#endif // defined(INTERLEAVE)
+
+ // Output step X
+#if defined(INTERLEAVE)
+#define OUTPUT_STEP_X (K0) * (H0)
+#else // Do not interleave
+#define OUTPUT_STEP_X (K0)
+#endif // defined(INTERLEAVE)
+
+ const int x = GET_SPATIAL_IDX(0, 1, 0);
+ const int y = GET_SPATIAL_IDX(1, 1, 0);
+ const int z = GET_SPATIAL_IDX(2, 1, 0);
+
+ const int xi = x * N0;
+ const int yi = y * K0;
+
+ const int xo = y * BLOCK_SIZE * H0 + (x % H0) * OUTPUT_OFFSET_X;
+ const int yo = (x / H0);
+
+ src_offset_first_element_in_bytes += yi * src_stride_y + z * src_stride_z;
+ dst_offset_first_element_in_bytes += yo * dst_stride_y + z * dst_stride_z;
+
+ TILE(DATA_TYPE, K0, N0, in);
+ TILE(DATA_TYPE, N0, K0, in_tr);
+
+ // Initialize the tile to zero
+ for(int i = 0; i < K0; ++i)
+ {
+ in[i].v = 0;
+ }
+
+ // Load input tile
+ for(int i = 0; i < K0; ++i)
+ {
+ if(yi + i < src_h)
+ {
+ in[i].v = V_LOAD(DATA_TYPE, N0, BUFFER, src, xi, i, src_stride_y);
+ }
+ }
+
+ // Transpose input tile
+ for(int k0 = 0; k0 < K0; ++k0)
+ {
+ for(int n0 = 0; n0 < N0; ++n0)
+ {
+ in_tr[n0].s[k0] = in[k0].s[n0];
+ }
+ }
+
+ TILE(uint, N0, 1, dst_indirect_y);
+ for(int i = 0; i < N0; ++i)
+ {
+ dst_indirect_y[i].v = i;
+ }
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, N0, K0, 0, BUFFER, dst, xo, (OUTPUT_STEP_X * sizeof(DATA_TYPE)), false, in_tr, dst_indirect_y);
+
+#undef BLOCK_SIZE
+#undef OUTPUT_OFFSET_X
+#undef OUTPUT_STEP_X
+}
+
+#endif // defined(RESHAPE_RHS_T) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/common/gemmlowp.cl
index ad92511c22..62c4cd31f5 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/common/gemmlowp.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#include "gemm_helpers.h"
#include "helpers_asymm.h"
#include "repeat.h"
+#include "tile_helpers.h"
#if defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
@@ -289,7 +290,7 @@
(VECTOR_ACC_TYPE, k0, a, b, c); \
})
-#if defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
+#if defined(GEMMLOWP_MM_RESHAPED_LHS_NT_RHS_T)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices with QASYMM/QASYMM_SIGNED data type.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be NOT transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be transposed
@@ -392,10 +393,10 @@ __kernel void gemmlowp_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
- __global DATA_TYPE *lhs_addr = lhs_ptr + lhs_offset_first_element_in_bytes + (y % V0) * (uint)LHS_OFFSET_X + (y / V0) * (uint)lhs_stride_y + (z * lhs_stride_z);
+ __global DATA_TYPE *lhs_addr = (__global DATA_TYPE *)(lhs_ptr + lhs_offset_first_element_in_bytes + (y % V0) * (uint)LHS_OFFSET_X + (y / V0) * (uint)lhs_stride_y + (z * lhs_stride_z));
// Compute RHS matrix address
- __global DATA_TYPE *rhs_addr = rhs_ptr + rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y;
+ __global DATA_TYPE *rhs_addr = (__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y);
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
@@ -460,194 +461,13 @@ __kernel void gemmlowp_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
#undef RHS_OFFSET_X
#undef RHS_STEP_X
}
-#endif // defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
+#endif // defined(GEMMLOWP_MM_RESHAPED_LHS_NT_RHS_T)
-#if defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
+#if defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T_FUSED_OUTPUT_STAGE_FIXEDPOINT) || defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T)
+#if defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT)
+#define FUSED_OUTPUT_STAGE_FIXED_POINT
+#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT)
-/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
- * The LHS matrix is NOT reshaped
- * The RHS matrix is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
- *
- * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
- * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
- * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64)
- * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (i.e. -DN0=8, -DK0=4).
- * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2)
- * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2)
- * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
- * @note Only the following configurations of M0, N0 and K0 are currently supported:
- * - M0 = 1, 2, 3, 4, 5, 6, 7, 8
- * - N0 = 2, 3, 4, 8, 16
- * - K0 = 2, 3, 4, 8, 16
- * - H0 >= 1
- *
- * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
- *
- * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM8_SIGNED
- * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
- * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
- * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
- * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
- * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
- * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
- * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
- * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
- * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemmlowp_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
- IMAGE_DECLARATION(rhs),
- IMAGE_DECLARATION(dst),
- uint lhs_stride_z,
- uint rhs_stride_z,
- uint dst_stride_z
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint lhs_cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint dst_cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- // Block size
-#define RHS_BLOCK_SIZE ((K0) * (N0))
-
- // RHS offset and step X
-#if defined(RHS_INTERLEAVE)
-#define RHS_OFFSET_X (K0)
-#define RHS_STEP_X ((K0) * (H0))
-#define RHS_STEP_LOOP (1)
-#else // defined(RHS_INTERLEAVE)
-#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
-#define RHS_STEP_X (K0)
-#define RHS_STEP_LOOP (H0)
-#endif // defined(RHS_INTERLEAVE)
-
- uint x = get_global_id(0);
- uint y = get_global_id(1);
- uint z = get_global_id(2);
-
-#if defined(DUMMY_WORK_ITEMS)
- if((x * N0 >= N) || (y * M0 >= M))
- {
- return;
- }
-#endif // defined(DUMMY_WORK_ITEMS)
-
- // Compute LHS matrix address
- uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
-
- // Compute RHS matrix address
- uint rhs_offset = rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y;
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- rhs_offset += z * rhs_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
- REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
-
-#if defined(REINTERPRET_INPUT_AS_3D)
- // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
- CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply lhs_stride_z by DEPTH_GEMM3D
- lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- // Add offset for batched GEMM
- lhs_offset += z * lhs_stride_z;
-
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Initialize the accumulators
- REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(N0-1)=0;
-
- int i = 0;
- for(; i <= (K - K0); i += K0)
- {
- // Load values from LHS matrix
- LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
-
- // Load values from RHS matrix
- LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
-
- // Partial matrix multiplication M0,N0,K0
- ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c);
-
- lhs_offset += K0;
- rhs_offset += N0 * RHS_STEP_X * RHS_STEP_LOOP;
- }
- // Left-over accumulations
- for(; i < K; ++i)
- {
- // Load values from LHS matrix
- LOAD_BLOCK(M0, 1, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
-
- // Load values from RHS reshaped matrix
- LOAD_BLOCK(N0, 1, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
-
- ARM_MM_K0XN0XM0(M0, N0, 1, a, b, c);
- lhs_offset += 1;
- rhs_offset += 1;
- }
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
-
- REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
- CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Convert and store output block
- const bool cond_y = y == 0;
- const bool cond_x = ((x + 1) * N0 >= N);
-
- // Store output block
- REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_lp);
- STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-
-#undef RHS_BLOCK_SIZE
-#undef RHS_OFFSET_X
-#undef RHS_STEP_X
-}
-
-#if defined(RESULT_OFFSET) && defined(RESULT_SHIFT) && defined(RESULT_MULTIPLIER)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices with fused output stage using fixed-point arithmetic.
* The LHS matrix is NOT reshaped
* The RHS matrix is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
@@ -727,164 +547,162 @@ __kernel void gemmlowp_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
* @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
*/
-__kernel void gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint(IMAGE_DECLARATION(lhs),
- IMAGE_DECLARATION(rhs),
- IMAGE_DECLARATION(dst),
- uint lhs_stride_z,
- uint rhs_stride_z,
- uint dst_stride_z
+#if defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T_FUSED_OUTPUT_STAGE_FIXEDPOINT)
+__kernel void gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint
+#elif defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T) // defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T_FUSED_OUTPUT_STAGE_FIXEDPOINT)
+__kernel void gemmlowp_mm_reshaped_only_rhs_t
+#endif // defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T)
+(IMAGE_DECLARATION(lhs),
+ IMAGE_DECLARATION(rhs),
+ IMAGE_DECLARATION(dst),
+ uint lhs_stride_z,
+ uint rhs_stride_z,
+ uint dst_stride_z
#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint lhs_cross_plane_pad
+ ,
+ uint lhs_cross_plane_pad
#endif // REINTERPRET_INPUT_AS_3D
#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint dst_cross_plane_pad
+ ,
+ uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
#if defined(A_OFFSET)
- ,
- IMAGE_DECLARATION(sum_col)
+ ,
+ IMAGE_DECLARATION(sum_col)
#endif // defined(A_OFFSET)
#if defined(B_OFFSET)
- ,
- IMAGE_DECLARATION(sum_row)
+ ,
+ IMAGE_DECLARATION(sum_row)
#endif // defined(B_OFFSET)
#if defined(ADD_BIAS)
- ,
- VECTOR_DECLARATION(biases)
+ ,
+ VECTOR_DECLARATION(biases)
#endif // defined(ADD_BIAS)
#if defined(PER_CHANNEL_QUANTIZATION)
- ,
- VECTOR_DECLARATION(result_multipliers),
- VECTOR_DECLARATION(result_shifts)
+ ,
+ VECTOR_DECLARATION(result_multipliers),
+ VECTOR_DECLARATION(result_shifts)
#endif // defined(PER_CHANNEL_QUANTIZATION)
- )
+)
{
- // Block size
-#define RHS_BLOCK_SIZE ((K0) * (N0))
+ // @note: replace with (DIMENSION + PAD) once we pass the relevant info at compile time
+#define FULL_LHS_HEIGHT (lhs_stride_z / lhs_stride_y)
+#define FULL_DST_HEIGHT (dst_stride_z / dst_stride_y)
// RHS offset and step X
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (K0)
-#define RHS_STEP_X ((K0) * (H0))
-#define RHS_STEP_LOOP (1)
+#define RHS_STEP_X (K0 * H0)
#else // defined(RHS_INTERLEAVE)
-#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
+#define RHS_OFFSET_X (K0 * N0)
#define RHS_STEP_X (K0)
-#define RHS_STEP_LOOP (H0)
#endif // defined(RHS_INTERLEAVE)
+#define RHS_STEP_LOOP (N0 * K0 * H0)
- uint x = get_global_id(0);
- uint y = get_global_id(1);
- uint z = get_global_id(2);
+ uint x = GET_SPATIAL_IDX(0, 1, 1);
+ uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
+ uint z = GET_SPATIAL_IDX(2, 1, 1);
+ int xo = (x * N0);
#if defined(DUMMY_WORK_ITEMS)
- if((x * N0 >= N) || (y * M0 >= M))
+ if((xo >= N) || (y >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
- uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
+ uint lhs_y = y + z * FULL_LHS_HEIGHT;
// Compute RHS matrix address
- uint rhs_offset = rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y;
+ uint rhs_offset_x = (x % H0) * RHS_OFFSET_X;
+ uint rhs_offset_y = (x / H0) * rhs_stride_y;
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
+ rhs_offset_y += (z % MATRIX_B_DEPTH) * rhs_stride_z;
#else // defined(MATRIX_B_DEPTH)
- rhs_offset += z * rhs_stride_z;
+ rhs_offset_y += z * rhs_stride_z;
#endif // defined(MATRIX_B_DEPTH)
- REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
- REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
-
-#if defined(REINTERPRET_INPUT_AS_3D)
- // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
- CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply lhs_stride_z by DEPTH_GEMM3D
- lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- // Add offset for batched GEMM
- lhs_offset += z * lhs_stride_z;
-
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
// Initialize the accumulators
- REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(N0-1)=0;
+ TILE(ACC_DATA_TYPE, M0, N0, c);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
int i = 0;
for(; i <= (K - K0); i += K0)
{
+ TILE(DATA_TYPE, M0, K0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
// Load values from LHS matrix
- LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
+ T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, i, lhs_y, 1, lhs_stride_y, a);
- // Load values from RHS matrix
- LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
+ // // Load values from RHS matrix
+ LOOP_UNROLLING(int, _i, 0, 1, N0,
+ {
+ b[_i].v = VLOAD(K0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + rhs_offset_x + rhs_offset_y + _i * RHS_STEP_X));
+ })
// Partial matrix multiplication M0,N0,K0
- ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c);
+ T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
- lhs_offset += K0;
- rhs_offset += N0 * RHS_STEP_X * RHS_STEP_LOOP;
+ rhs_offset_x += RHS_STEP_LOOP;
}
+
+#if((K % K0) != 0)
+
// Left-over accumulations
for(; i < K; ++i)
{
- // Load values from LHS matrix
- LOAD_BLOCK(M0, 1, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
-
- // Load values from RHS reshaped matrix
- LOAD_BLOCK(N0, 1, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, N0, 1, b);
- ARM_MM_K0XN0XM0(M0, N0, 1, a, b, c);
- lhs_offset += 1;
- rhs_offset += 1;
- }
- // Result of MM is of type DATA_TYPE
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
-
- REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
- CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
+ // Load values from LHS matrix
+ T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, i, lhs_y, 1, lhs_stride_y, a);
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
+ LOOP_UNROLLING(int, _i, 0, 1, N0,
+ {
+ b[_i].v = *(__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + rhs_offset_x + rhs_offset_y + _i * RHS_STEP_X);
+ })
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
+ T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
+ rhs_offset_x += 1;
+ }
+#endif // ((K % K0) != 0)
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
+#if defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
- // Convert result of matrix multiplication to S32
- REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_int);
+ TILE(int, M0, N0, c_int);
+ TILE(int, M0, N0, offset_s32);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ offset_s32[i].v = (VEC_DATA_TYPE(int, N0))K_OFFSET;
+ })
- // Offset contribution: c += (A_OFFSET * sum_col) + (B_OFFSET * sum_row) + K_OFFSET;
- REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(int, N0), offset_s32_, K_OFFSET);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_int[i].v = CONVERT_SAT(c[i].v, VEC_DATA_TYPE(int, N0));
+ })
#if defined(A_OFFSET)
- // Compute the offset contribution due to A_OFFSET
- __global uchar *sum_col_addr = sum_col_ptr + sum_col_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
#if defined(SUM_COL_HAS_BATCHES)
- sum_col_addr += z * sum_col_stride_y;
+ int sum_col_y = z;
+#else // defined(SUM_COL_HAS_BATCHES)
+ int sum_col_y = 0;
#endif // defined(SUM_COL_HAS_BATCHES)
- VEC_DATA_TYPE(int, N0)
- a_offset_s32 = VLOAD(N0)(0, (__global int *)sum_col_addr);
- a_offset_s32 *= (VEC_DATA_TYPE(int, N0))A_OFFSET;
+ TILE(int, 1, N0, a_offset_s32);
- REPEAT_ADD_VECTOR_TO_VAR(M0, offset_s32_, a_offset_s32);
+ T_LOAD(int, 1, N0, BUFFER, sum_col, xo, sum_col_y, 1, sum_col_stride_y, a_offset_s32);
+
+ a_offset_s32[0].v *= A_OFFSET;
+
+ T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, offset_s32, a_offset_s32, offset_s32);
#endif // defined(A_OFFSET)
#if defined(B_OFFSET)
@@ -892,71 +710,96 @@ __kernel void gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint(IMAG
// Note: The sum_row tensor is generated through CLGEMMLowpMatrixAReductionKernel which
// does not introduce paddings. For this reason is safe to access the tensor in this manner
// without considering that the coordinate "y" could come from an input 3D tensor
- __global uchar *sum_row_addr = sum_row_ptr + sum_row_offset_first_element_in_bytes + (COMPUTE_M0_START_ROW(y, (uint)M0, PARTIAL_STORE_M0)) * sizeof(int) + z * sum_row_stride_y;
+ TILE(int, M0, N0, b_offset_s32);
- LOAD_SCALAR_AS_VECTOR(M0, N0, int, b_offset_s32_, sum_row_addr, 0, sum_row_stride_x);
+ T_LOAD(int, M0, 1, BUFFER, sum_row, y + z * (sum_row_stride_y / sizeof(int)), 0, 1, sum_row_stride_x, b_offset_s32);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ offset_s32[i].v += b_offset_s32[i].v *B_OFFSET;
+ })
- REPEAT_MLA_VAR_WITH_CONST_VEC(M0, offset_s32_, b_offset_s32_, (VEC_DATA_TYPE(int, N0))B_OFFSET);
#endif // defined(B_OFFSET)
#if defined(ADD_BIAS)
- // Add bias
- __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
- VEC_DATA_TYPE(int, N0)
- bias_values = VLOAD(N0)(0, (__global int *)bias_addr);
- REPEAT_ADD_VECTOR_TO_VAR(M0, offset_s32_, bias_values);
+ TILE(int, 1, N0, bias);
+
+ T_LOAD(int, 1, N0, BUFFER, biases, xo, 0, 1, 0, bias);
+
+ T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, offset_s32, bias, offset_s32);
#endif // defined(ADD_BIAS)
- REPEAT_ADD_TWO_VARS(M0, c_int, offset_s32_);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_int[i].v += offset_s32[i].v;
+ })
+
+ TILE(DATA_TYPE, M0, N0, c_lp);
// Multiply by result_mult_int and shift
#if defined(PER_CHANNEL_QUANTIZATION)
- __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
- __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
+ TILE(int, 1, N0, res_mul);
+ TILE(int, 1, N0, res_shift);
- VEC_DATA_TYPE(int, N0)
- res_mul = VLOAD(N0)(0, (__global int *)result_multipliers_addr);
- VEC_DATA_TYPE(int, N0)
- res_shift = VLOAD(N0)(0, (__global int *)result_shifts_addr);
-
- REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL(M0, N0, c_int, res_mul, res_shift);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-
-#if RESULT_SHIFT < 0
- REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(M0, N0, c_int, RESULT_MULTIPLIER, RESULT_SHIFT);
-#else // RESULT_SHIFT >= 0
- REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(M0, N0, c_int, RESULT_MULTIPLIER, RESULT_SHIFT);
-#endif // RESULT_SHIFT < 0
+ T_LOAD(int, 1, N0, BUFFER, result_multipliers, xo, 0, 0, 0, res_mul);
+ T_LOAD(int, 1, N0, BUFFER, result_shifts, xo, 0, 0, 0, res_shift);
+ T_QUANTIZE8(int, DATA_TYPE, PER_CHANNEL, M0, N0, RESULT_OFFSET, RESULT_SHIFT, RESULT_MULTIPLIER, c_int, res_mul, res_shift, c_lp);
+#else // defined(PER_CHANNEL_QUANTIZATION)
+ T_QUANTIZE8(int, DATA_TYPE, PER_TENSOR, M0, N0, RESULT_OFFSET, RESULT_SHIFT, RESULT_MULTIPLIER, c_int, 0, 0, c_lp);
#endif // defined(PER_CHANNEL_QUANTIZATION)
- // Add the offset terms to GEMM's result
- REPEAT_ADD_CONST_TO_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, RESULT_OFFSET);
-
#if defined(MIN_BOUND)
- REPEAT_MAX_CONST_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, MIN_BOUND);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_lp[i].v = max(c_lp[i].v, (VEC_DATA_TYPE(DATA_TYPE, N0))MIN_BOUND);
+ })
#endif // defined(MIN_BOUND)
#if defined(MAX_BOUND)
- REPEAT_MIN_CONST_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, MAX_BOUND);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_lp[i].v = min(c_lp[i].v, (VEC_DATA_TYPE(DATA_TYPE, N0))MAX_BOUND);
+ })
#endif // defined(MAX_BOUND)
- // Convert and store output block
- const bool cond_y = y == 0;
- const bool cond_x = ((x + 1) * N0 >= N);
+#else // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
+ TILE(int, M0, N0, c_lp);
- // Store output block
- REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c_int, c_lp);
- STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_lp[i].v = CONVERT_SAT(c[i].v, VEC_DATA_TYPE(int, N0));
+ })
+#endif // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
+
+ TILE(uint, M0, 1, dst_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+#if defined(REINTERPRET_OUTPUT_AS_3D)
+ dst_indirect_y[i].v = (uint)min((int)((y + i) % HEIGHT_GEMM3D), (int)HEIGHT_GEMM3D - 1);
+ dst_indirect_y[i].v += (uint)min((int)((y + i) / HEIGHT_GEMM3D), (int)DEPTH_GEMM3D - 1) * FULL_DST_HEIGHT;
+ dst_indirect_y[i].v += z *FULL_DST_HEIGHT *DEPTH_GEMM3D;
+#else // (REINTERPRET_OUTPUT_AS_3D)
+ dst_indirect_y[i].v = (uint)min((int)y + i, (int)M - 1) + z *FULL_DST_HEIGHT;
+#endif // defined(REINTERPRET_OUTPUT_AS_3D)
+ })
+
+ const bool cond_x = (xo > (N - N0)) & (PARTIAL_STORE_N0 != 0);
+
+#if defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, xo, dst_stride_y, cond_x, c_lp, dst_indirect_y);
+#else // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
+ T_STORE_INDIRECT_WIDTH_SELECT(int, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, xo, dst_stride_y, cond_x, c_lp, dst_indirect_y);
+#endif // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
-#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
+#undef RHS_STEP_LOOP
}
-#endif // defined(RESULT_OFFSET) && defined(RESULT_SHIFT) && defined(RESULT_MULTIPLIER)
-#endif // defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
+#endif // defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T_FUSED_OUTPUT_STAGE_FIXEDPOINT) || defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T)
-#if defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
+#if defined(GEMMLOWP_MM_NATIVE)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix is NOT reshaped
@@ -1139,9 +982,9 @@ __kernel void gemmlowp_mm_native(IMAGE_DECLARATION(lhs),
REPEAT_VAR_INIT_CONVERT(M0, VEC_DATA_TYPE(int, N0), c, res); // resN = CONVERT(cN, VEC_DATA_TYPE(int, N0));
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, res, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
}
-#endif // defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
+#endif // defined(GEMMLOWP_MM_NATIVE)
-#if defined(COLS_A)
+#if defined(GEMMLOWP_MATRIX_A_REDUCTION)
/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
* It is also possible to multiply each reduced row by a scalar value, if SCALAR is passed at compile time.
*
@@ -1205,8 +1048,9 @@ __kernel void gemmlowp_matrix_a_reduction(TENSOR3D_DECLARATION(src),
#endif // defined(SCALAR)
*((__global int *)dst.ptr) = (int)sum_row;
}
+#endif // defined(GEMMLOWP_MATRIX_A_REDUCTION)
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
+#if defined(GEMMLOWP_MATRIX_A_REDUCTION_DOT8)
/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A using the arm dot product instruction.
* It is also possible to multiply each reduced row by a scalar value, if SCALAR is passed at compile time.
*
@@ -1252,17 +1096,17 @@ __kernel void gemmlowp_matrix_a_reduction_dot8(TENSOR3D_DECLARATION(src),
VEC_DATA_TYPE(DATA_TYPE, 16)
a0 = vload16(0, matrix_a + i);
- sum_row += arm_dot(a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
- sum_row += arm_dot(a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
- sum_row += arm_dot(a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
- sum_row += arm_dot(a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
+ DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
+ DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
+ DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
+ DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
a0 = vload16(1, matrix_a + i);
- sum_row += arm_dot(a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
- sum_row += arm_dot(a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
- sum_row += arm_dot(a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
- sum_row += arm_dot(a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
+ DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
+ DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
+ DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
+ DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
}
// This for loop performs the leftover accumulations
@@ -1276,10 +1120,9 @@ __kernel void gemmlowp_matrix_a_reduction_dot8(TENSOR3D_DECLARATION(src),
#endif // defined(SCALAR)
*((__global int *)dst.ptr) = (int)sum_row;
}
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-#endif // defined(COLS_A)
+#endif // defined(GEMMLOWP_MATRIX_A_REDUCTION_DOT8)
-#if defined(COLS_B) && defined(ROWS_B) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
+#if defined(GEMMLOWP_MATRIX_B_REDUCTION)
/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
* It is also possible to multiply each reduced column by a scalar value, if SCALAR is passed at compile time.
*
@@ -1359,7 +1202,7 @@ __kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
STORE_VECTOR_SELECT(res, int, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif // defined(COLS_B) && defined(ROWS_B) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
+#endif // defined(GEMMLOWP_MATRIX_B_REDUCTION)
#endif // defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
@@ -1463,6 +1306,7 @@ inline VEC_INT offset_contribution(
return (VEC_INT)K_OFFSET + a_offset_s32 + b_offset_s32;
}
+#if defined(GEMMLOWP_OFFSET_CONTRIBUTION)
/* OpenCL kernel used to add the offset contribution after matrix multiplication. The computation is performed in-place
*
* This kernel takes a final int32 accumulator value (the output of matrix multiplication),
@@ -1566,8 +1410,9 @@ __kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result)
// Store the result with the offset contribution
STORE_VECTOR_SELECT(in_s32_, int, mm_result_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
+#endif // defined(GEMMLOWP_OFFSET_CONTRIBUTION)
-#if defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE)
+#if defined(GEMMLOWP_OFFSET_CONTRIBUTION_QUANTIZE_DOWN)
/* OpenCL kernel used to add the offset contribution after @ref CLGEMMLowpMatrixMultiplyKernel and it quantizes down to uint8.
*
* This kernel takes a final int32 accumulator value (the output of @CLGEMMLowpMatrixMultiplyKernel), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
@@ -1743,7 +1588,9 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
// Store the result
STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
+#endif // defined(GEMMLOWP_OFFSET_CONTRIBUTION_QUANTIZE_DOWN)
+#if defined(GEMMLOWP_OFFSET_CONTRIBUTION_QUANTIZE_DOWN_FIXEDPOINT)
/* OpenCL kernel used to add the offset contribution after matrix multiplication and it quantizes down to uint8.
*
* This kernel takes a final int32 accumulator value (the output of matrix multiplication), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
@@ -1924,13 +1771,13 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC
// Store the result
STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE)
+#endif // defined(GEMMLOWP_OFFSET_CONTRIBUTION_QUANTIZE_DOWN_FIXEDPOINT)
#undef VEC_INT
#endif // defined(K_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
-#if defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
+#if defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN)
/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
*
* This kernel takes a final int32 accumulator value and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
@@ -2026,9 +1873,9 @@ __kernel void gemmlowp_output_stage_quantize_down(TENSOR3D_DECLARATION(src),
// Store the result
STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif // defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
+#endif // defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN)
-#if defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
+#if defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN_FIXEDPOINT)
/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
*
* This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
@@ -2123,10 +1970,9 @@ __kernel void gemmlowp_output_stage_quantize_down_fixedpoint(TENSOR3D_DECLARATIO
// Store the result
STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif // defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
-
-#if defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
+#endif // defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN_FIXEDPOINT)
+#if defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN_FIXEDPOINT_QSYMM16)
/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QSYMM16
*
* This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QSYMM16 value.
@@ -2215,9 +2061,9 @@ __kernel void gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16(TENSOR3D_DE
// Store the result
STORE_VECTOR_SELECT(res, short, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif // defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
+#endif // defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN_FIXEDPOINT_QSYMM16)
-#if defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)
+#if defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN_FLOAT)
/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
*
* This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
@@ -2292,12 +2138,13 @@ __kernel void gemmlowp_output_stage_quantize_down_float(TENSOR3D_DECLARATION(src
VEC_DATA_TYPE(int, VEC_SIZE)
biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
- input_values += (int4)biases_values;
+ input_values += (VEC_DATA_TYPE(int, VEC_SIZE))biases_values;
#endif // defined(ADD_BIAS)
// Convert to float
- float4 input_values_f = convert_float4(input_values);
- input_values_f = round(input_values_f * (float)REAL_MULTIPLIER + (float)OUTPUT_OFFSET);
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ input_values_f = CONVERT(input_values, VEC_DATA_TYPE(float, VEC_SIZE));
+ input_values_f = round(input_values_f * (float)REAL_MULTIPLIER + (float)OUTPUT_OFFSET);
VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
res0 = CONVERT_SAT(input_values_f, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
@@ -2312,4 +2159,4 @@ __kernel void gemmlowp_output_stage_quantize_down_float(TENSOR3D_DECLARATION(src
// Store the result
STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif // defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)
+#endif // defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN_FLOAT)
diff --git a/src/core/CL/cl_kernels/common/gemmlowp_reshaped_only_rhs_mmul.cl b/src/core/CL/cl_kernels/common/gemmlowp_reshaped_only_rhs_mmul.cl
new file mode 100644
index 0000000000..72fe3d3b89
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/gemmlowp_reshaped_only_rhs_mmul.cl
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+#if defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_MMUL)
+/** This OpenCL kernel computes the matrix multiplication between 2 matrices using the MMUL extension:
+ *
+ * The LHS matrix is NOT reshaped
+ * The RHS is reshaped with @ref ClGemmMatrixMultiplyReshapedOnlyRhsKernel and the block K0xN0 is transposed
+ *
+ * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (e.g. -DN0=1, -DK0=1).
+ * @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=1)
+ * @note The number of output columns processed by the the cooperative mmul extension must be passed at compile time using -DMMUL_N0 (e.g., -DMMUL_N0=4)
+ * @note The number of output rows processed by the the cooperative mmul extension must be passed at compile time using -DMMUL_M0 (e.g., -DMMUL_M0=4)
+ * @note The number of lhs columns (or rhs rows) processed by the the cooperative mmul extension must be passed at compile time using -DMMUL_K0 (e.g., -DMMUL_K0=16)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 4
+ * - N0 = 1, 4, 8
+ * - K0 = 4
+ *
+ * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
+ * The activation function is performed after the bias addition
+ *
+ * @param[in] lhs_ptr Pointer to the LHS tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
+ * @param[in] lhs_stride_y Stride of the LHS tensor in Y dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the LHS tensor in Z dimension (in bytes)
+ * @param[in] lhs_w The size of the width dimension of the LHS tensor
+ * @param[in] lhs_h The size of the height dimension of the LHS tensor
+ * @param[in] lhs_n The size of the depth dimension of the LHS tensor
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS tensor
+ * @param[in] rhs_ptr Pointer to the RHS reshaped tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the RHS tensor in Y dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the RHS tensor in Z dimension (in bytes)
+ * @param[in] rhs_w The size of the width dimension of the RHS tensor
+ * @param[in] rhs_h The size of the height dimension of the RHS tensor
+ * @param[in] rhs_n The size of the depth dimension of the RHS tensor
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS tensor
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor. Supported data type: S32
+ * @param[in] bia_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bia_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bia_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bia_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bia_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p lhs_ptr or S32
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the depth dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] M Number of rows in LHS matrix not reshaped
+ * @param[in] N Number of columns in RHS matrix not reshaped
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix not reshaped
+ * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: S32
+ * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
+ * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: S32
+ * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
+ */
+__kernel void gemmlowp_mm_reshaped_only_rhs_mmul(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#if defined(ADD_BIAS)
+ TENSOR3D_T(bia, BUFFER),
+#endif // defined(ADD_BIAS)
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int N,
+ const int K
+#if defined(A_OFFSET)
+ ,
+ TENSOR3D_T(sum_col, BUFFER)
+#endif // defined(A_OFFSET)
+#if defined(B_OFFSET)
+ ,
+ TENSOR3D_T(sum_row, BUFFER)
+#endif // defined(B_OFFSET)
+)
+{
+#define MMUL_BLOCK_SIZE (MMUL_N0 * MMUL_M0)
+#define VEC_SIZE 4 // For int8 types input to mmul instruction is a length 4 vector
+
+ uint x0 = get_global_id(0);
+ uint y0 = get_global_id(1);
+ uint z = get_global_id(2);
+
+ // Get block ID and thread ID within the block
+ uint block_id = (x0 / MMUL_BLOCK_SIZE);
+ uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+
+ // Coordinate within a block
+ uint block_x = thread_id % MMUL_N0;
+ uint block_y = (thread_id / MMUL_M0);
+
+ // Starting destination coordinates
+ uint dst_x = min(block_x * N0 + block_id * MMUL_N0 * N0, (uint)(N - 1));
+ uint dst_y = min(block_y * M0 + y0 * M0 * MMUL_M0, (uint)(M - M0));
+
+ uint lhs_x = VEC_SIZE * block_x;
+ uint lhs_y = dst_y;
+
+ uint rhs_x = VEC_SIZE * N0 * block_y;
+ uint rhs_y = 4 * block_id + block_x;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(OUT_DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ TILE(ACC_DATA_TYPE, M0, N0, c);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+ for(int k = 0; k <= K - MMUL_K0; k += MMUL_K0)
+ {
+ TILE(DATA_TYPE, M0, VEC_SIZE, a);
+ T_LOAD(DATA_TYPE, M0, VEC_SIZE, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+
+ TILE(DATA_TYPE, N0, VEC_SIZE, b);
+ T_LOAD(DATA_TYPE, N0, VEC_SIZE, BUFFER, rhs, 0, 0, 1, VEC_SIZE, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ VEC_TYPE vec_a = (VEC_TYPE)(a[m0].s[0], a[m0].s[1], a[m0].s[2], a[m0].s[3]);
+ VEC_TYPE vec_b = (VEC_TYPE)(b[n0].s[0], b[n0].s[1], b[n0].s[2], b[n0].s[3]);
+ c[m0].s[n0] = arm_matrix_multiply(vec_a, vec_b, c[m0].s[n0]);
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += MMUL_K0 * N0 * sizeof(DATA_TYPE);
+ }
+
+ if(block_x * N0 + block_id * MMUL_N0 * N0 >= N)
+ {
+ return;
+ }
+
+ if(block_y * M0 + y0 * M0 * MMUL_M0 >= M)
+ {
+ return;
+ }
+
+#if defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
+
+ TILE(int, M0, N0, offset_s32);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ offset_s32[i].v = (VEC_DATA_TYPE(int, N0))K_OFFSET;
+ })
+
+#if defined(A_OFFSET)
+
+ TILE(int, 1, N0, a_offset_s32);
+
+ T_LOAD(int, 1, N0, BUFFER, sum_col, dst_x, z, 1, sum_col_stride_z, a_offset_s32);
+
+ a_offset_s32[0].v *= A_OFFSET;
+
+ T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, offset_s32, a_offset_s32, offset_s32);
+#endif // defined(A_OFFSET)
+
+#if defined(B_OFFSET)
+
+ TILE(int, M0, 1, b_offset_s32);
+
+ T_LOAD(int, M0, 1, BUFFER, sum_row, dst_y, z * M, 1, 4, b_offset_s32);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ offset_s32[m0].v += b_offset_s32[m0].v *B_OFFSET;
+ })
+
+#endif // defined(B_OFFSET)
+
+#if defined(ADD_BIAS)
+#if defined(BROADCAST_BIAS)
+ bia_offset_first_element_in_bytes += dst_x * sizeof(ACC_DATA_TYPE) + z * bia_stride_y;
+
+ TILE(int, M0, N0, bias);
+
+ T_LOAD(int, M0, N0, BUFFER, bia, dst_x, dst_y, 1, 1, bias);
+
+ T_ADD(ACC_DATA_TYPE, M0, N0, offset_s32, bias, offset_s32);
+
+#else // defined(BROADCAST_BIAS)
+ bia_offset_first_element_in_bytes += dst_x * sizeof(ACC_DATA_TYPE);
+
+ TILE(int, 1, N0, bias);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ bias[0].v = VLOAD(N0)(0, (ACC_DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes));
+ }
+ else
+ {
+ VLOAD_PARTIAL(N0, N0_LEFTOVER)
+ (bias[0].v, 0, (ACC_DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes));
+ }
+
+ T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, offset_s32, bias, offset_s32);
+
+#endif // defined(BROADCAST_BIAS)
+#endif // defined(ADD_BIAS)
+
+ T_ADD(ACC_DATA_TYPE, M0, N0, c, offset_s32, c);
+ TILE(OUT_DATA_TYPE, M0, N0, c_lp);
+ T_QUANTIZE8(ACC_DATA_TYPE, OUT_DATA_TYPE, PER_TENSOR, M0, N0, RESULT_OFFSET, RESULT_SHIFT, RESULT_MULTIPLIER, c, 0, 0, c_lp);
+
+#if defined(MIN_BOUND)
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_lp[i].v = max(c_lp[i].v, (VEC_DATA_TYPE(OUT_DATA_TYPE, N0))MIN_BOUND);
+ })
+#endif // defined(MIN_BOUND)
+#if defined(MAX_BOUND)
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_lp[i].v = min(c_lp[i].v, (VEC_DATA_TYPE(OUT_DATA_TYPE, N0))MAX_BOUND);
+ })
+#endif // defined(MAX_BOUND)
+
+ T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (c_lp[m0].v, 0, (__global OUT_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (c_lp[m0].v, 0, (__global OUT_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+
+#else // FUSED_OUTPUT_STAGE_FIXED_POINT
+ // Store
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (c[m0].v, 0, (__global OUT_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (c[m0].v, 0, (__global OUT_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+#endif // FUSED_OUTPUT_STAGE_FIXED_POINT
+}
+
+#endif // defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_MMUL)
diff --git a/src/core/CL/cl_kernels/gemv.cl b/src/core/CL/cl_kernels/common/gemv.cl
index aaa83975f8..71a372eb29 100644
--- a/src/core/CL/cl_kernels/gemv.cl
+++ b/src/core/CL/cl_kernels/common/gemv.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/generate_proposals.cl b/src/core/CL/cl_kernels/common/generate_proposals.cl
index e8306c55a8..bfe1922ac2 100644
--- a/src/core/CL/cl_kernels/generate_proposals.cl
+++ b/src/core/CL/cl_kernels/common/generate_proposals.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -59,18 +59,16 @@ __kernel void generate_proposals_compute_all_anchors(
Vector anchors = CONVERT_TO_VECTOR_STRUCT_NO_STEP(anchors);
Vector rois = CONVERT_TO_VECTOR_STRUCT(rois);
- const size_t idx = get_global_id(0);
+ const unsigned int idx = get_global_id(0);
// Find the index of the anchor
- const size_t anchor_idx = idx % NUM_ANCHORS;
+ const unsigned int anchor_idx = idx % NUM_ANCHORS;
// Find which shift is this thread using
- const size_t shift_idx = idx / NUM_ANCHORS;
+ const unsigned int shift_idx = idx / NUM_ANCHORS;
// Compute the shift on the X and Y direction (the shift depends exclusively by the index thread id)
- const DATA_TYPE
- shift_x = (DATA_TYPE)(shift_idx % WIDTH) * STRIDE;
- const DATA_TYPE
- shift_y = (DATA_TYPE)(shift_idx / WIDTH) * STRIDE;
+ const float shift_x = (float)(shift_idx % WIDTH) * STRIDE;
+ const float shift_y = (float)(shift_idx / WIDTH) * STRIDE;
const VEC_DATA_TYPE(DATA_TYPE, NUM_ROI_FIELDS)
shift = (VEC_DATA_TYPE(DATA_TYPE, NUM_ROI_FIELDS))(shift_x, shift_y, shift_x, shift_y);
diff --git a/src/core/CL/cl_kernels/generate_proposals_quantized.cl b/src/core/CL/cl_kernels/common/generate_proposals_quantized.cl
index 04264197f4..70f861c4b7 100644
--- a/src/core/CL/cl_kernels/generate_proposals_quantized.cl
+++ b/src/core/CL/cl_kernels/common/generate_proposals_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/instance_normalization.cl b/src/core/CL/cl_kernels/common/instance_normalization.cl
index 480d9cd20c..f9b3cd3620 100644
--- a/src/core/CL/cl_kernels/instance_normalization.cl
+++ b/src/core/CL/cl_kernels/common/instance_normalization.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,14 +23,11 @@
*/
#include "helpers.h"
-#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(INTERNAL_DATA_TYPE) && defined(GAMMA) && defined(BETA) && defined(EPSILON) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
-/** This function normalizes the input 2D tensor across the first dimension with respect to mean and standard deviation of the same dimension.
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(INTERNAL_DATA_TYPE) & defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
+/** This function computes the mean and variance of each plane of the input tensor and provides it as output.
*
* @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
* @attention Data type should be passed using the -DDATA_TYPE=data_type compile flag, e.g. -DDATA_TYPE=float
- * @attention The scale scalar value applied to the normalized tensor should be passed using the -DGAMMA=value compile flag, e.g. -DGAMMA=1.3
- * @attention The offset scalar value applied to the normalized tensor should be passed using the -DBETA=value compile flag, e.g. -DBETA=2.4
- * @attention Normalization epsilon parameter should be given as a preprocessor argument with -DEPSILON=value. e.g. -DEPSILON=0.001f
* @attention Dimensions X, Y, and Z should be given as a preprocessor argument with -DDIM_X=value, -DDIM_Y=value, -DDIM_Z=value. e.g. -DDIM_X=6, -DDIM_Y=2, -DDIM_Z=7
*
* @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
@@ -40,6 +37,8 @@
* @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
* @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
* @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
* @param[out] output_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr
* @param[in] output_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes)
@@ -50,38 +49,37 @@
* @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
*/
-__kernel void instance_normalization(
- TENSOR4D_DECLARATION(input)
-#ifndef IN_PLACE
- ,
- TENSOR4D_DECLARATION(output)
-#endif /* IN_PLACE */
-)
+__kernel void compute_mean_var(
+ TENSOR4D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
-#ifndef IN_PLACE
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
-#endif /* IN_PLACE */
-
- INTERNAL_DATA_TYPE sum = 0.f;
- INTERNAL_DATA_TYPE sum_sq = 0.f;
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
#if defined(NHWC)
-
- const int ch = get_global_id(0); // Current channel
- const int batch = get_global_id(2); // Current batch
- const int elements_plane = DIM_Y * DIM_Z;
+ const int ch = get_global_id(0); // Current channel
+ const int batch = get_global_id(1); // Current batch
+ const int elements_plane = DIM_Y * DIM_Z;
+ INTERNAL_DATA_TYPE part_sum = 0.f;
+ INTERNAL_DATA_TYPE part_sum_sq = 0.f;
+ const int in_offset = input_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
for(int i_w = 0; i_w < DIM_Y; ++i_w)
{
for(int i_h = 0; i_h < DIM_Z; ++i_h)
{
INTERNAL_DATA_TYPE data = (INTERNAL_DATA_TYPE) * ((__global DATA_TYPE *)tensor4D_offset(&in, ch, i_w, i_h, batch));
- sum += data;
- sum_sq += data * data;
+ part_sum += data;
+ part_sum_sq += data * data;
}
}
+ INTERNAL_DATA_TYPE mean = (part_sum / elements_plane);
+ INTERNAL_DATA_TYPE var = (part_sum_sq / elements_plane) - (mean * mean);
+ __global INTERNAL_DATA_TYPE *output_address0 = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
+ *output_address0 = mean;
+ __global INTERNAL_DATA_TYPE *output_address1 = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
+ *output_address1 = var;
#else // !defined(NHWC)
const int ch = get_global_id(2) % DIM_Z; // Current channel
const int batch = get_global_id(2) / DIM_Z; // Current batch
@@ -127,16 +125,83 @@ __kernel void instance_normalization(
part_sum.s0 += part_sum.s1;
part_sum_sq.s0 += part_sum_sq.s1;
- sum = (INTERNAL_DATA_TYPE)part_sum.s0;
- sum_sq = (INTERNAL_DATA_TYPE)part_sum_sq.s0;
+ INTERNAL_DATA_TYPE sum = (INTERNAL_DATA_TYPE)part_sum.s0;
+ INTERNAL_DATA_TYPE sum_sq = (INTERNAL_DATA_TYPE)part_sum_sq.s0;
+
+ const INTERNAL_DATA_TYPE mean = (sum / elements_plane);
+ const INTERNAL_DATA_TYPE var = (sum_sq / elements_plane) - (mean * mean);
+
+ __global INTERNAL_DATA_TYPE *output_address0 = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
+ *output_address0 = mean;
+ __global INTERNAL_DATA_TYPE *output_address1 = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
+ *output_address1 = var;
#endif // defined(NHWC)
+}
+#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z) */
+
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(INTERNAL_DATA_TYPE) && defined(GAMMA) && defined(BETA) && defined(EPSILON) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
+/** This function normalizes the input 2D tensor across the first dimension with respect to mean and standard deviation of the same dimension.
+ *
+ * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @attention Data type should be passed using the -DDATA_TYPE=data_type compile flag, e.g. -DDATA_TYPE=float
+ * @attention The scale scalar value applied to the normalized tensor should be passed using the -DGAMMA=value compile flag, e.g. -DGAMMA=1.3
+ * @attention The offset scalar value applied to the normalized tensor should be passed using the -DBETA=value compile flag, e.g. -DBETA=2.4
+ * @attention Normalization epsilon parameter should be given as a preprocessor argument with -DEPSILON=value. e.g. -DEPSILON=0.001f
+ * @attention Dimensions X, Y, and Z should be given as a preprocessor argument with -DDIM_X=value, -DDIM_Y=value, -DDIM_Z=value. e.g. -DDIM_X=6, -DDIM_Y=2, -DDIM_Z=7
+ *
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
+ */
+__kernel void instance_normalization(
+ TENSOR4D_DECLARATION(input),
+ TENSOR3D_DECLARATION(mean_var)
+#ifndef IN_PLACE
+ ,
+ TENSOR4D_DECLARATION(output)
+#endif /* IN_PLACE */
+)
+{
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
+ Tensor3D mean_var = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(mean_var);
+#ifndef IN_PLACE
+ Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output);
+#endif /* IN_PLACE */
+
+#if defined(NHWC)
+ const int ch = get_global_id(0); // Current channel
+ const int batch = get_global_id(2); // Current batch
+#else /* defined(NHWC) */
+ const int ch = get_global_id(2) % DIM_Z; // Current channel
+ const int batch = get_global_id(2) / DIM_Z; // Current batch
+#endif /* defined(NHWC) */
- const INTERNAL_DATA_TYPE mean = (sum / elements_plane);
- const INTERNAL_DATA_TYPE var = (sum_sq / elements_plane) - (mean * mean);
- const INTERNAL_DATA_TYPE multip = GAMMA / sqrt(var + EPSILON);
+ const __global INTERNAL_DATA_TYPE *mean_ptr = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&mean_var, ch, 0, batch);
+ const __global INTERNAL_DATA_TYPE *var_ptr = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&mean_var, ch, 1, batch);
+ const INTERNAL_DATA_TYPE mean = (INTERNAL_DATA_TYPE) * mean_ptr;
+ const INTERNAL_DATA_TYPE var = (INTERNAL_DATA_TYPE) * var_ptr;
+ const INTERNAL_DATA_TYPE multip = GAMMA / sqrt(var + EPSILON);
+ const INTERNAL_DATA_TYPE beta = (INTERNAL_DATA_TYPE)BETA;
#if defined(NHWC)
+ const int in_offset = input_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
+#ifndef IN_PLACE
+ const int out_offset = output_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
+#endif /* IN_PLACE */
for(int i_w = 0; i_w < DIM_Y; ++i_w)
{
@@ -151,7 +216,6 @@ __kernel void instance_normalization(
*(output_address) = (*(input_address) - mean) * multip + (INTERNAL_DATA_TYPE)BETA;
}
}
-
#else // !defined(NHWC)
for(int y = 0; y < DIM_Y; ++y)
{
diff --git a/src/core/CL/cl_kernels/common/l2_normalize.cl b/src/core/CL/cl_kernels/common/l2_normalize.cl
new file mode 100644
index 0000000000..fbe3406239
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/l2_normalize.cl
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(VEC_SIZE_X) && defined(VEC_SIZE_LEFTOVER_X)
+/** This kernel performs l2 normalization on x-axis
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE_X=size. e.g. -DVEC_SIZE_X=16
+ * @note The leftover size in the X dimension shoud be given as preprocessor argument using -DVEC_SIZE_LEFTOVER_X is; x_dimension % VEC_SIZE_X. e.g. -DVEC_SIZE_LEFTOVER_X=1
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] sum_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] sum_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] epsilon Epsilon value
+ */
+__kernel void l2_normalize_x(
+ IMAGE_DECLARATION(input),
+ IMAGE_DECLARATION(sum),
+ IMAGE_DECLARATION(output),
+ DATA_TYPE epsilon)
+{
+ // Offset computation
+ const uint x_offs = max((int)(get_global_id(0) * VEC_SIZE_X - (VEC_SIZE_X - VEC_SIZE_LEFTOVER_X) % VEC_SIZE_X), 0);
+
+ // Address computation
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * input_stride_y;
+ __global uchar *sum_addr = sum_ptr + sum_offset_first_element_in_bytes + get_global_id(1) * sum_stride_y;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * output_stride_y;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ in = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)input_addr);
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ normalize_value = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X))rsqrt(fmax(*((__global DATA_TYPE *)sum_addr), epsilon));
+
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ data0 = in * normalize_value;
+
+ STORE_VECTOR_SELECT(data, DATA_TYPE, output_addr, VEC_SIZE_X, VEC_SIZE_LEFTOVER_X, VEC_SIZE_LEFTOVER_X != 0 && get_global_id(0) == 0);
+}
+
+/** This kernel performs l2 normalization on y-axis.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE_X=size. e.g. -DVEC_SIZE_X=16
+ * @note The leftover size in the X dimension shoud be given as preprocessor argument using -DVEC_SIZE_LEFTOVER_X is; x_dimension % VEC_SIZE_X. e.g. -DVEC_SIZE_LEFTOVER_X=1
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] sum_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] sum_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] epsilon Epsilon value
+ */
+__kernel void l2_normalize_y(
+ IMAGE_DECLARATION(input),
+ IMAGE_DECLARATION(sum),
+ IMAGE_DECLARATION(output),
+ DATA_TYPE epsilon)
+{
+ // Offset computation
+ const uint x_offs = max((int)(get_global_id(0) * VEC_SIZE_X - (VEC_SIZE_X - VEC_SIZE_LEFTOVER_X) % VEC_SIZE_X), 0);
+
+ // Address computation
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * input_stride_y;
+ __global uchar *sum_addr = sum_ptr + sum_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE);
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * output_stride_y;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ in = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)input_addr);
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ sums = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)sum_addr);
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ normalize_value = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X))rsqrt(fmax(sums, epsilon));
+
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ data0 = in * normalize_value;
+
+ STORE_VECTOR_SELECT(data, DATA_TYPE, output_addr, VEC_SIZE_X, VEC_SIZE_LEFTOVER_X, VEC_SIZE_LEFTOVER_X != 0 && get_global_id(0) == 0);
+}
+
+/** This kernel performs l2 normalization on z-axis.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE_X=size. e.g. -DVEC_SIZE_X=16
+ * @note The leftover size in the X dimension shoud be given as preprocessor argument using -DVEC_SIZE_LEFTOVER_X is; x_dimension % VEC_SIZE_X. e.g. -DVEC_SIZE_LEFTOVER_X=1
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] sum_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] sum_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] sum_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] epsilon Epsilon value
+ */
+__kernel void l2_normalize_z(
+ TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(sum),
+ TENSOR3D_DECLARATION(output),
+ DATA_TYPE epsilon)
+{
+ // Offset computation
+ const uint x_offs = max((int)(get_global_id(0) * VEC_SIZE_X - (VEC_SIZE_X - VEC_SIZE_LEFTOVER_X) % VEC_SIZE_X), 0);
+
+ // Address computation
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z;
+ __global uchar *sum_addr = sum_ptr + sum_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * sum_stride_y;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ in = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)input_addr);
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ sums = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)sum_addr);
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
+ data0 = in * ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X))(rsqrt(fmax(sums, epsilon))));
+
+ STORE_VECTOR_SELECT(data, DATA_TYPE, output_addr, VEC_SIZE_X, VEC_SIZE_LEFTOVER_X, VEC_SIZE_LEFTOVER_X != 0 && get_global_id(0) == 0);
+}
+#endif // defined(VEC_SIZE_X) && defined(VEC_SIZE_LEFTOVER_X) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/common/mat_mul.cl b/src/core/CL/cl_kernels/common/mat_mul.cl
new file mode 100644
index 0000000000..c7ef8ae52b
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/mat_mul.cl
@@ -0,0 +1,708 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#ifdef BIAS
+// This function performs in-place bias addition for float/half datatype when bias is enabled.
+// Note The tile's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 (e.g. -DN0=8, -DM0=4).
+inline void perform_bias_addition(uchar *bias_ptr, uint bias_offset_first_element_in_bytes, TILE(DATA_TYPE, M0, N0, acc), uint x)
+{
+ TILE(DATA_TYPE, 1, N0, bias_tile);
+
+ // below expands to use bias_ptr and bias_offset_first_element_in_bytes
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, x, 0, 1, 0, bias_tile);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, M0, N0, acc, bias_tile, acc);
+}
+#endif // defined(BIAS)
+
+#if defined(MAT_MUL_NATIVE_NT_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4).
+ * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output bounded activation functions.
+ * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3)
+ * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the rhs tensor must be passed at compile time using -DRHS_TENSOR_TYPE (e.g. -DRHS_TENSOR_TYPE=BUFFER)
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_NT_NT)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16 (only 4, 8, 16 if RHS_TENSOR_TYPE=IMAGE)
+ * - K0 = 1, 2, 3, 4, 8, 16
+ * @note Values > 8 for M0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_nt_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, RHS_TENSOR_TYPE),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0);
+ const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
+ const uint z = GET_SPATIAL_IDX(2, 1, 0);
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z;
+ dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(DATA_TYPE, M0, N0, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ acc[i].v = 0.f;
+ })
+
+ const int rhs_z = z * rhs_h;
+ int k;
+ for(k = 0; k <= K - K0; k += K0)
+ {
+ TILE(DATA_TYPE, M0, K0, a);
+ TILE(DATA_TYPE, K0, N0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.f;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ b[i].v = 0.f;
+ })
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, K0, N0, RHS_TENSOR_TYPE, rhs, x, k + rhs_z, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, a, b, acc);
+
+ lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE);
+ }
+
+#if K % K0 != 0
+ /* Leftover Loop */
+ for(; k < K; ++k)
+ {
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, 1, N0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.f;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, 1,
+ {
+ b[i].v = 0.f;
+ })
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, rhs, x, k + rhs_z, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, a, b, acc);
+
+ lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE);
+ }
+#endif // K % K0 != 0
+
+ const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0;
+ const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0;
+
+ TILE(int, M0, 1, indirect_buffer);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond));
+ });
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x);
+#endif // defined(BIAS)
+
+ T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc);
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer);
+}
+#endif // defined(MAT_MUL_NATIVE_NT_NT)
+
+#if defined(MAT_MUL_NATIVE_NT_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4).
+ * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output bounded activation functions.
+ * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3)
+ * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the rhs tensor must be passed at compile time using -DRHS_TENSOR_TYPE (e.g. -DRHS_TENSOR_TYPE=BUFFER)
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_NT_T)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1, 2, 3, 4, 8, 16 (only 4, 8, 16 if RHS_TENSOR_TYPE=IMAGE)
+ * @note Values > 8 for M0, N0 and K0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_nt_t(TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, RHS_TENSOR_TYPE),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+
+{
+ const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0);
+ const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
+ const uint z = GET_SPATIAL_IDX(2, 1, 0);
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z;
+ dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(DATA_TYPE, M0, N0, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ acc[i].v = 0.f;
+ })
+
+ const int rhs_z = z * rhs_h;
+ int k;
+ for(k = 0; k <= K - K0; k += K0)
+ {
+ TILE(DATA_TYPE, M0, K0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.f;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.f;
+ })
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, K0, RHS_TENSOR_TYPE, rhs, k, x + rhs_z, 1, rhs_stride_y, b);
+
+#if GPU_ARCH == GPU_ARCH_MIDGARD
+ // This part is written to decrease the number of loop unrollings caused
+ // by T_MMUL. The NT/NT version is partly vectorized and uses less number
+ // of loop unrollings, and code behaves as expected. Although this is not
+ // a performant solution for the specified architecture, it is necessary
+ // to overcome some limitations.
+ TILE(DATA_TYPE, K0, N0, bt);
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, K0,
+ {
+ bt[j].s[i] = b[i].s[j];
+ })
+ })
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, a, bt, acc);
+#else // GPU_ARCH == GPU_ARCH_MIDGARD
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, T, a, b, acc);
+#endif // GPU_ARCH == GPU_ARCH_MIDGARD
+
+ lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE);
+ }
+
+#if K % K0 != 0
+ /* Leftover Loop */
+ for(; k < K; ++k)
+ {
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, N0, 1, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.f;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.f;
+ })
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, 1, BUFFER, rhs, k, x + rhs_z, 1, rhs_stride_y, b);
+
+#if GPU_ARCH == GPU_ARCH_MIDGARD
+ // See the main loop for the explanation of this part
+ TILE(DATA_TYPE, 1, N0, bt);
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ bt[0].s[i] = b[i].s[0];
+ })
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, a, bt, acc);
+#else // GPU_ARCH == GPU_ARCH_MIDGARD
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, T, a, b, acc);
+#endif // GPU_ARCH == GPU_ARCH_MIDGARD
+
+ lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE);
+ }
+#endif // K % K0 != 0
+
+ const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0;
+ const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0;
+
+ TILE(int, M0, 1, indirect_buffer);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond));
+ });
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x);
+#endif // defined(BIAS)
+
+ T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc);
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer);
+}
+#endif // defined(MAT_MUL_NATIVE_NT_T)
+
+#if defined(MAT_MUL_NATIVE_T_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS non-transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4).
+ * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output bounded activation functions.
+ * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3)
+ * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the rhs tensor must be passed at compile time using -DRHS_TENSOR_TYPE (e.g. -DRHS_TENSOR_TYPE=BUFFER)
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_T_NT)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 8, 16
+ * - N0 = 1, 2, 3, 4, 8, 16 (only 4, 8, 16 if RHS_TENSOR_TYPE=IMAGE)
+ * - K0 > 0
+ * * @note Values > 8 for M0, and K0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_t_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, RHS_TENSOR_TYPE),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0);
+ const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
+ const uint z = GET_SPATIAL_IDX(2, 1, 0);
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += y * sizeof(DATA_TYPE) + z * lhs_stride_z;
+ dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(DATA_TYPE, M0, N0, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ acc[i].v = 0.f;
+ })
+
+ const int rhs_z = z * rhs_h;
+ int k;
+ for(k = 0; k <= K - K0; k += K0)
+ {
+ TILE(DATA_TYPE, K0, M0, a);
+ TILE(DATA_TYPE, K0, N0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ a[i].v = 0.f;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ b[i].v = 0.f;
+ })
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, K0, N0, RHS_TENSOR_TYPE, rhs, x, k + rhs_z, 1, rhs_stride_y, b);
+
+#if GPU_ARCH == GPU_ARCH_MIDGARD
+ // For explanation, see mat_mul_native_nt_t
+ TILE(DATA_TYPE, M0, K0, at);
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, M0,
+ {
+ at[j].s[i] = a[i].s[j];
+ })
+ })
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, at, b, acc);
+#else // GPU_ARCH == GPU_ARCH_MIDGARD
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, T, NT, a, b, acc);
+#endif // GPU_ARCH == GPU_ARCH_MIDGARD
+
+ lhs_offset_first_element_in_bytes += K0 * lhs_stride_y;
+ }
+
+#if K % K0 != 0
+ /* Leftover Loop */
+ for(; k < K; ++k)
+ {
+ TILE(DATA_TYPE, 1, M0, a);
+ TILE(DATA_TYPE, 1, N0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, 1,
+ {
+ a[i].v = 0.f;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, 1,
+ {
+ b[i].v = 0.f;
+ })
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, rhs, x, k + rhs_z, 1, rhs_stride_y, b);
+
+#if GPU_ARCH == GPU_ARCH_MIDGARD
+ // For explanation, see mat_mul_native_nt_t
+ TILE(DATA_TYPE, M0, 1, at);
+ LOOP_UNROLLING(int, j, 0, 1, M0,
+ {
+ at[j].s[0] = a[0].s[j];
+ })
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, at, b, acc);
+#else // GPU_ARCH == GPU_ARCH_MIDGARD
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, T, NT, a, b, acc);
+#endif // GPU_ARCH == GPU_ARCH_MIDGARD
+
+ lhs_offset_first_element_in_bytes += 1 * lhs_stride_y;
+ }
+#endif // K % K0 != 0
+
+ const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0;
+ const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0;
+
+ TILE(int, M0, 1, indirect_buffer);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond));
+ });
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x);
+#endif // defined(BIAS)
+
+ T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc);
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer);
+}
+#endif // defined(MAT_MUL_NATIVE_T_NT)
+
+#if defined(MAT_MUL_NATIVE_T_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4).
+ * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output bounded activation functions.
+ * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3)
+ * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the rhs tensor must be passed at compile time using -DRHS_TENSOR_TYPE (e.g. -DRHS_TENSOR_TYPE=BUFFER)
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_T_NT)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 8, 16
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1, 2, 3, 4, 8, 16 (only 4, 8, 16 if RHS_TENSOR_TYPE=IMAGE)
+ * @note Values > 8 for M0, N0 and K0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr,
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_t_t(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, RHS_TENSOR_TYPE),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0);
+ const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
+ const uint z = GET_SPATIAL_IDX(2, 1, 0);
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += y * sizeof(DATA_TYPE) + z * lhs_stride_z;
+ dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(DATA_TYPE, M0, N0, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ acc[i].v = 0.f;
+ })
+
+ const int rhs_z = z * rhs_h;
+ int k;
+ for(k = 0; k <= K - K0; k += K0)
+ {
+ TILE(DATA_TYPE, K0, M0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ a[i].v = 0.f;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.f;
+ })
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, K0, RHS_TENSOR_TYPE, rhs, k, x + rhs_z, 1, rhs_stride_y, b);
+#if GPU_ARCH == GPU_ARCH_MIDGARD
+ // For explanation, see mat_mul_native_nt_t
+ TILE(DATA_TYPE, M0, K0, at);
+ TILE(DATA_TYPE, K0, N0, bt);
+
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, M0,
+ {
+ at[j].s[i] = a[i].s[j];
+ })
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, K0,
+ {
+ bt[j].s[i] = b[i].s[j];
+ })
+ })
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, at, bt, acc);
+#else // GPU_ARCH == GPU_ARCH_MIDGARD
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, T, T, a, b, acc);
+#endif // GPU_ARCH == GPU_ARCH_MIDGARD
+
+ lhs_offset_first_element_in_bytes += K0 * lhs_stride_y;
+ }
+
+#if K % K0 != 0
+ /* Leftover Loop */
+ for(; k < K; ++k)
+ {
+ TILE(DATA_TYPE, 1, M0, a);
+ TILE(DATA_TYPE, N0, 1, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, 1,
+ {
+ a[i].v = 0.f;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.f;
+ })
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, 1, BUFFER, rhs, k, x + rhs_z, 1, rhs_stride_y, b);
+
+#if GPU_ARCH == GPU_ARCH_MIDGARD
+ // For explanation, see mat_mul_native_nt_t
+ TILE(DATA_TYPE, M0, 1, at);
+ TILE(DATA_TYPE, 1, N0, bt);
+
+ LOOP_UNROLLING(int, j, 0, 1, M0,
+ {
+ at[j].s[0] = a[0].s[j];
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ bt[0].s[i] = b[i].s[0];
+ })
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, at, bt, acc);
+#else // GPU_ARCH == GPU_ARCH_MIDGARD
+ T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, T, T, a, b, acc);
+#endif // GPU_ARCH == GPU_ARCH_MIDGARD
+
+ lhs_offset_first_element_in_bytes += 1 * lhs_stride_y;
+ }
+#endif // K % K0 != 0
+
+ const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0;
+ const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0;
+
+ TILE(int, M0, 1, indirect_buffer);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond));
+ });
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x);
+#endif // defined(BIAS)
+
+ T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc);
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer);
+}
+#endif // defined(MAT_MUL_NATIVE_T_T)
diff --git a/src/core/CL/cl_kernels/common/mat_mul_mmul.cl b/src/core/CL/cl_kernels/common/mat_mul_mmul.cl
new file mode 100644
index 0000000000..e549da86d4
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/mat_mul_mmul.cl
@@ -0,0 +1,946 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#ifdef BIAS
+// This function performs in-place bias addition for float and half datatypes when bias is enabled.
+// Note The tile's dimensions used for the LHS and RHS matrices (M0, N0) must be passed at compile time using -DN0, -DM0 (e.g. -DN0=8, -DM0=4).
+inline void perform_bias_addition(uchar *bias_ptr, uint bias_offset_first_element_in_bytes, TILE(DATA_TYPE, M0, N0, acc), uint x)
+{
+ TILE(DATA_TYPE, 1, N0, bias_tile);
+
+ // below expands to use bias_ptr and bias_offset_first_element_in_bytes
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, x, 0, 1, 0, bias_tile);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, M0, N0, acc, bias_tile, acc);
+}
+#endif // defined(BIAS)
+
+#if defined(MAT_MUL_NATIVE_MMUL_NT_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul) using MMUL: LHS non-transposed, RHS non-transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The tile's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=1).
+ * @note The number of leftover outputs rows/columns must be passed using -DN0_LEFTOVER and -DM0_LEFTOVER (e.g. -DN0_LEFTOVER=2, -DM0_LEFTOVER=3)
+ * @note The MMUL block dimension (MMUL_M0, MMUL_N0, MMUL_K0) must be passed at compile time using -DMMUL_M0, -DMMUL_N0 and -DMMUL_K0 (e.g. -DMMUL_M0=4, -DMMUL_N0=4, -DMMUL_K0=4).
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_MMUL_NT_NT)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1
+ * @note Values > 8 for M0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ * @param[in] M Number of rows in LHS matrix
+ * @param[in] N Number of columns in RHS matrix
+ * @param[in] K Number of columns in LHS matrix and rows in RHS matrix, which is multiple of MMUL_K0.
+ */
+__kernel void mat_mul_native_mmul_nt_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int N,
+ const int K)
+{
+#define MMUL_BLOCK_SIZE (MMUL_M0 * MMUL_N0) // MMUL block size for the output matrix
+
+ // The output/destination matrix is divided into "sections". Each section is filled by a group of
+ // threads of size MMUL_BLOCK_SIZE, bundled together according to GWS_x.
+ // Each thread writes to a tile of M0 x N0 (the usual output block size for a thread) in the output matrix.
+ // Therefore, the section dimensions are (MMUL_M0 x M0) x (MMUL_N0 x N0).
+
+ // The GWS is constructed in such a way that the y global id is the y section coordinate,
+ // and the x global id is a transformed thread id: MMUL_BLOCK_SIZE number of consecutive threads
+ // in the x dimension corresponding to a section.
+ // This can be visualized as first obtaining the coordinates of all the sections:
+ // x = [0, (N / N0) / MMUL_N0) --> (N / N0) / MMUL_N0 is the number of sections in x dimension
+ // y = [0, (M / M0) / MMUL_M0) --> (M / M0) / MMUL_M0 is the number of sections in y dimension
+ // Then multiply the x coordinates with MMUL_SECTION_NUM_THREADS to get the consecutive thread ids in the x dimension
+ // x = [0, ((N / N0) / MMUL_N0) * MMUL_N0 * MMUL_M0)
+ // x = [0, (N / N0) * MMUL_MO)
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get section coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Within these sections, each thread writes onto a small output block of size M0 x N0
+ // in row major order. A section divided into tiles can be visualized as below.
+ //
+ // (Figure 1)
+ // A Section in the Output Matrix
+ //
+ // _____N0__________N0____________________N0____
+ // | | | | |
+ // | | | | |
+ // M0 | Thread 1 | Thread 2 | ... | Thread |
+ // | | | | MMUL_N0 |
+ // |___________|__________|_________|___________|
+ // | | | |
+ // | | | |
+ // M0 | Thread | . | |
+ // | MMUL_N0+1 | . | | (M0 x MMUL_M0)
+ // |___________| . | |
+ // | . | |
+ // | . | |
+ // | . | |
+ // | |___________|
+ // | | |
+ // | | Thread |
+ // M0 | | MMUL_N0 x |
+ // | | MMUL_M0 |
+ // |________________________________|___________|
+ // N0 x MMUL_N0
+ //
+ // The output matrix has several of these sections. As shown above, each section
+ // will be filled by a separate thread group of size MMUL_BLOCK_SIZE. The overall
+ // section layout of the output matrix is as below. For instance, S(1,1) will be filled
+ // by MMUL_BLOCK_SIZE (possibly equal to 16) threads, so as S(0,1) and others.
+ //
+ // (Figure 2)
+ // DST Matrix
+ // ____________________________________
+ // | | | | |
+ // | S(0,0) | S(0,1) | ... | S(0, X) |
+ // |________|________|_______|_________|
+ // | | | | |
+ // | S(1,0) | S(1,1) | ... | S(1, X) |
+ // |________|________|_______|_________|
+ // | . | | |
+ // | . | | | Y = (M / M0) / MMUL_M0 - 1 (Max possible section y coordinate)
+ // | . | | | X = (N / N0) / MMUL_N0 - 1 (Max possible section x coordinate)
+ // |________|________|_________________|
+ // | | | | | S(y, x) denotes the section, and y and x are computed in
+ // | S(Y,0) | S(Y,1) | | S(Y, X) | section_y, section_x respectively.
+ // |________|________|_______|_________|
+ //
+ //
+ //
+ //
+ // A complete view involving the three matrices is given below. It examplifies how the section S(0,0) is computed.
+ //
+ // (Figure 3)
+ // Complete View
+ //
+ // LHS Matrix RHS Matrix DST Matrix
+ //
+ // ___MMUL_K0___________ __MMUL_N0 x N0____________ ___MMUL_N0 x N0____________________
+ // /|xxxxxxxxxx| | /|xxxxxxxxxxxxxxx| | /|xxxxxxxxxxxxxxxxxxx| |
+ // / |xxxxxxxxxx| | MMUK_K0 ||xxxxxxxxxxxxxxx| | / |xxxxxxxxxxxxxxxxxxx| |
+ // MMUL_M0 | |xxxxxxxxxx| ---> | ||xxxxxxxxxxxxxxx| . . . | MMUL_M0 | |xxxxxxxxxxxxxxxxxxx| |
+ // x M0 | |xxxxxxxxxx| | \|_______________|_________| x M0 | |xxxxxxxxxxxxxxxxxxx| ... |
+ // | |xxxxxxxxxx| | | | | |xxxxxxxxxxxxxxxxxxx| |
+ // | |xxxxxxxxxx| | x | | | = \ |xxxxxxxxxxxxxxxxxxx| |
+ // \|__________|_________| | | | \|___________________| |
+ // | | | \/ | | |
+ // | , | |_________________________| | . |
+ // | , | | . |
+ // | , | | . |
+ // |____________________| |_________________________________|
+ //
+ // Horizontal and vertical arrows show the direction of K loop (main loop in the kernel).
+ // Each output section shown above is a zooomed out version of Figure 1.
+ //
+ // In each iteration of the main loop, LHS matrix traverses towards rightward, and RHS matrix traverses towards downward,
+ // the LHS section of (MMUL_M0 x M0) x MMUL_K0 and RHS section of MMUL_K0 x (MMUL_N0 x N0) is multiplied
+ // "cooperatively" using arm_matrix_multiply calls, and the result is accummulated over the output (DST) section
+ // of size (MMUL_M0 x M0) x (MMUL_N0 x N0) shown with 'x' signs.
+ //
+ // If it was a single thread, this multiplication would have been straightforward with a T_MMUL call.
+ // However, since it involves multiple threads working together using the aforementioned extension, it
+ // works slightly differently.
+ //
+ // Here is how threads access the LHS and RHS matrices:
+ // (Assume MMUL_K0 = MMUL_N0 = MMUL_M0 = 4 because the following diagram is heavily dependent on this)
+ //
+ // (Figure 4)
+ // Thread Access Layouts in LHS & RHS matrices
+ //
+ // LHS matrix RHS Matrix
+ // ___________________________ __________N0 times______N0 times____________________N0 times_______
+ // |__T0__|__T1__|__T2__|__T3__| |__T0__| ... |__T0__|__T1__| ... |__T1__| ... |__T3__| ... |__T3__|
+ // |__T0__| ... | |__T4__| ... |__T4__|__T5__| ... |__T5__| ... |__T7__| ... |__T7__|
+ // M0 | . . | |__T8__| ... |__T8__|__T9__| ... |__T9__| ... |__T11_| ... |__T11_|
+ // Times | . . | |__T12_|_____|__T12_|__T13_|______|__T13_|_____|__T15_|_____|__T15_|
+ // | . . | X
+ // |__T0__|__T1__|__T2__|__T3__|
+ // |__T4__|__T5__|__T6__|__T7__|
+ // |__T4__|__T5__|__T6__|__T7__|
+ // M0 | . . |
+ // Times | . . |
+ // | . . |
+ // |__T4__|__T5__|__T6__|__T7__|
+ // |__T8__|__T9__|__T10_|__T11_|
+ // M0 | . |
+ // Times | . |
+ // | . |
+ // |__T12_|__T13_|__T14_|__T15_|
+ // M0 | . |
+ // Times | . |
+ // | . |
+ // |__T12_|__T13_|__T14_|__T15_|
+ //
+ //
+ // This access layout is designed such that the threads access continuous elements of each matrix (in terms of row/column).
+ // To multiply these large sections, the arm_matrix_multiply call is made for each of the M0xN0 elements. So, for each
+ // combination of m0 and n0 (iterators of M0 and N0 from 0 to M0-1 and N0-1 respectively), one arm_matrix_multiply call is
+ // made, and MMUL_BLOCK_SIZE number of threads compute the result.
+ //
+ // The matrix multiplication taking place in this extension
+ // is an "interleaved" one, because, for example, if m0=0 and n0=0, i.e. the first iteration, we would use the first,
+ // M0-th, 2M0-th and 3M0-th rows of the LHS matrix. Similarly, we jump N0 steps in the RHS matrix. This is how we access
+ // one element for each thread in a single (m0, n0) loop.
+ //
+ // For example, if we have
+ // - a 8 x 4 LHS section
+ // - 4 x 8 RHS section
+ // - Each vector variable ai, bj represent a 4x1 vector
+ // - ^T (superscript T) denotes transpose
+ // - M0 = N0 = 2
+ // - MMUL_N0 = MMUL_M0 = MMUL_K0 = 4
+ //
+ // (Figure 5)
+ // Mathematical view of the Matrix Multiplication
+ //
+ // LHS RHS DST
+ // [ a1^T ] [ b1 b2 b3 b4 b5 b6 b7 ] [ a1^Tb1 a1^Tb2 a1^Tb3 ... a1^Tb7 ]
+ // [ a2^T ] 4 x 8 [ a2^Tb1 a2^Tb2 a2^Tb3 ... a2^Tb7 ]
+ // [ a3^T ] [ ]
+ // [ a4^T ] = [ . . ]
+ // [ a5^T ] X [ . . ]
+ // [ a6^T ] [ . . ]
+ // [ a7^T ] [ ]
+ // [ a8^T ] [ a7^Tb1 a7^Tb2 a7^Tb3 ... a7^Tb7 ]
+ // 8 x 4 8 x 8
+ //
+ //
+ // For the first iteration, i.e. (m0, n0) = (0, 0), the arm_matrix_multiply would multiply the following matrices:
+ //
+ // [ a1^T ] [ b1 b3 b5 b7 ] [ a1^Tb1 a1^Tb3 a1^Tb5 a1^Tb7 ]
+ // [ a3^T ] x 4 x 4 = [ a3^Tb1 a1^Tb3 a1^Tb5 a1^Tb7 ]
+ // [ a5^T ] [ a5^Tb1 a1^Tb3 a1^Tb5 a1^Tb7 ]
+ // [ a7^T ] [ a7^Tb1 a7^Tb3 a7^Tb5 a7^Tb7 ]
+ // 4 x 4 4 x 4
+ // The elements calculated in the 4x4 output block are the "interleaved" elements in the DST above.
+ // When we follow for each combination of (m0, n0), every element of the DST matrix "section" is filled.
+ //
+
+ // Get thread coordinates within an mmul block (of size MMUL_BLOCK_SIZE)
+ // Since threads are grouped in x dimension, the modular of x-dim global id
+ // wrt the MMUL_BLOCK_SIZE is the thread id in the group, ranging from 0 to
+ // MMUL_BLOCK_SIZE-1. Because the thread numbering is in row-major order.
+ const uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ const uint thread_x = thread_id % MMUL_N0;
+ const uint thread_y = (thread_id / MMUL_N0);
+
+ // Starting destination coordinates
+ // Note: We need to clamp dst_x and dst_y because we always need to execute a complete MMUL block! Only after the matrix multiplication
+ // part can we exit the kernel if it is out-of-bound. Remember, we have a cooperative matrix multiplication. Therefore, we need a full block to get the correct results
+ // Although we will never write out-of-bound, we still need this clamp to ensure that we do not read out-of-bound either.
+ // The unclamped dst coordinates can be calculated easily from the output section coordinates and the thread coordinates (see above figure).
+
+ // See Figure 1 & 2. Thread step size is N0 and M0,
+ // Section step size is N0 x MMUL_N0 and M0 x MMUL_M0
+ // respectively for x, y dimensions.
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ const uint lhs_x = thread_x;
+ const uint lhs_y = dst_y;
+
+ // Starting RHS coordinates
+ const uint rhs_x = dst_x;
+ const uint rhs_y = thread_y;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ // MMUL extension accumulate the result in F32 for both F32 and F16
+ TILE(float, M0, N0, c_f32);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_f32[i].v = 0;
+ })
+
+ for(int k = 0; k < K; k += MMUL_K0)
+ {
+ // A tile of M0xK0 but K0 must be set to 1
+ TILE(DATA_TYPE, M0, 1, a);
+ // A tile of K0xN0 but K0 must be set to 1
+ TILE(DATA_TYPE, 1, N0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c_f32[m0].s[n0] = arm_matrix_multiply(a[m0].s[0], b[0].s[n0], c_f32[m0].s[n0]);
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += MMUL_K0 * rhs_stride_y;
+ }
+
+ // For threads "outside" of the dst bound, we do not write but we have to "read" (arm_matrix_multiply). That's why this needs to happen after arm_matrix_multiply
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if defined(HALF_PRECISION)
+ TILE(DATA_TYPE, M0, N0, c);
+
+ // Conversion required for the half precision
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c[m0].s[n0] = c_f32[m0].s[n0];
+ })
+ })
+#else // defined(HALF_PRECISION)
+#define c c_f32
+#endif // defined(HALF_PRECISION)
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+
+#undef MMUL_BLOCK_SIZE
+}
+#endif // defined(MAT_MUL_NATIVE_MMUL_NT_NT)
+
+#if defined(MAT_MUL_NATIVE_MMUL_T_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul) using MMUL: LHS transposed, RHS non-transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The tile's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=1).
+ * @note The number of leftover outputs rows/columns must be passed using -DN0_LEFTOVER and -DM0_LEFTOVER (e.g. -DN0_LEFTOVER=2, -DM0_LEFTOVER=3)
+ * @note The MMUL block dimension (MMUL_M0, MMUL_N0, MMUL_K0) must be passed at compile time using -DMMUL_M0, -DMMUL_N0 and -DMMUL_K0 (e.g. -DMMUL_M0=4, -DMMUL_N0=4, -DMMUL_K0=4).
+ * @note The dimension K must be passed at compile time using -DK (e.g. -DK=4). K must be a multiple of MMUL_K0
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_MMUL_T_NT)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 8, 16
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1
+ * @note Values > 8 for M0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ * @param[in] M Number of rows in DST matrix
+ * @param[in] N Number of columns in DST matrix
+ * @param[in] K Number of rows in LHS and RHS matrices, which is multiple of MMUL_K0.
+ */
+__kernel void mat_mul_native_mmul_t_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int N,
+ const int K)
+{
+#define MMUL_BLOCK_SIZE (MMUL_M0 * MMUL_N0)
+ // For explanations on how this kernel works, please refer to NT/NT kernel. This kernel makes little modifications to it.
+
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get section coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Get thread coordinates
+ uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ uint thread_x = thread_id % MMUL_N0;
+ uint thread_y = (thread_id / MMUL_N0);
+
+ // See Nt/Nt kernel for explanations
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ uint lhs_x = dst_y;
+ uint lhs_y = thread_x;
+
+ // Starting RHS coordinates
+ uint rhs_x = dst_x;
+ uint rhs_y = thread_y;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ // MMUL extension accumulate the result in F32 for both F32 and F16
+ TILE(float, M0, N0, c_f32);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_f32[i].v = 0;
+ })
+
+ for(int k = 0; k < K; k += MMUL_K0)
+ {
+ TILE(DATA_TYPE, 1, M0, a);
+ TILE(DATA_TYPE, 1, N0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c_f32[m0].s[n0] = arm_matrix_multiply(a[0].s[m0], b[0].s[n0], c_f32[m0].s[n0]);
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += MMUL_K0 * rhs_stride_y;
+ }
+
+ // For threads "outside" of the dst bound, we do not write but we have to "read" (arm_matrix_multiply). That's why this needs to happen after arm_matrix_multiply
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if defined(HALF_PRECISION)
+ TILE(DATA_TYPE, M0, N0, c);
+
+ // Conversion required for the half precision
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c[m0].s[n0] = c_f32[m0].s[n0];
+ })
+ })
+#else // defined(HALF_PRECISION)
+#define c c_f32
+#endif // defined(HALF_PRECISION)
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+
+#undef MMUL_BLOCK_SIZE
+}
+#endif // defined(MAT_MUL_NATIVE_MMUL_T_NT)
+
+#if defined(MAT_MUL_NATIVE_MMUL_NT_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul) using MMUL: LHS non-transposed, RHS transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The tile's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=1).
+ * @note The number of leftover outputs rows/columns must be passed using -DN0_LEFTOVER and -DM0_LEFTOVER (e.g. -DN0_LEFTOVER=2, -DM0_LEFTOVER=3)
+ * @note The MMUL block dimension (MMUL_M0, MMUL_N0, MMUL_K0) must be passed at compile time using -DMMUL_M0, -DMMUL_N0 and -DMMUL_K0 (e.g. -DMMUL_M0=4, -DMMUL_N0=4, -DMMUL_K0=4).
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_MMUL_NT_T)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1
+ * @note Values > 8 for M0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ * @param[in] M Number of rows in LHS matrix
+ * @param[in] N Number of columns in RHS matrix
+ * @param[in] K Number of columns in LHS matrix and columns in RHS matrix, which is multiple of MMUL_K0.
+ */
+__kernel void mat_mul_native_mmul_nt_t(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int N,
+ const int K)
+{
+#define MMUL_BLOCK_SIZE (MMUL_M0 * MMUL_N0)
+ // For explanations on how this kernel works, please refer to NT/NT kernel. This kernel makes little modifications to it.
+
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get block coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Get thread coordinates within a block
+ const uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ const uint thread_x = thread_id % MMUL_N0;
+ const uint thread_y = (thread_id / MMUL_N0);
+
+ // Starting destination coordinates
+ // Note: We need to clamp dst_x and dst_y because we always need to execute a complete MMUL block! Only after the matrix multiplication
+ // part can we exit the kernel if it is out-of-bound. Remember, we have a cooperative matrix multiplication. Therefore, we need a full block to get the correct results
+ // Although we will never write out-of-bound, we still need this clamp to ensure that we do not read out-of-bound either.
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ const uint lhs_x = thread_x;
+ const uint lhs_y = dst_y;
+
+ // Starting RHS coordinates
+ const uint rhs_x = thread_y;
+ const uint rhs_y = dst_x;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ // MMUL extension accumulate the result in F32 for both F32 and F16
+ TILE(float, M0, N0, c_f32);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_f32[i].v = 0;
+ })
+
+ for(int k = 0; k < K; k += MMUL_K0)
+ {
+ // A tile of M0xK0 but K0 must be set to 1
+ TILE(DATA_TYPE, M0, 1, a);
+ // A tile of N0xK0 but K0 must be set to 1
+ TILE(DATA_TYPE, N0, 1, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, 1, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c_f32[m0].s[n0] = arm_matrix_multiply(a[m0].s[0], b[n0].s[0], c_f32[m0].s[n0]);
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += MMUL_N0 * sizeof(DATA_TYPE);
+ }
+
+ // For threads "outside" of the dst bound, we do not write but we have to "read" (arm_matrix_multiply). That's why this needs to happen after arm_matrix_multiply
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if defined(HALF_PRECISION)
+ TILE(DATA_TYPE, M0, N0, c);
+
+ // Conversion required for the half precision
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c[m0].s[n0] = c_f32[m0].s[n0];
+ })
+ })
+#else // defined(HALF_PRECISION)
+#define c c_f32
+#endif // defined(HALF_PRECISION)
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+
+#undef MMUL_BLOCK_SIZE
+}
+#endif // defined(MAT_MUL_NATIVE_MMUL_NT_T)
+
+#if defined(MAT_MUL_NATIVE_MMUL_T_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul) using MMUL: LHS non-transposed, RHS transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The tile's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=1).
+ * @note The number of leftover outputs rows/columns must be passed using -DN0_LEFTOVER and -DM0_LEFTOVER (e.g. -DN0_LEFTOVER=2, -DM0_LEFTOVER=3)
+ * @note The MMUL block dimension (MMUL_M0, MMUL_N0, MMUL_K0) must be passed at compile time using -DMMUL_M0, -DMMUL_N0 and -DMMUL_K0 (e.g. -DMMUL_M0=4, -DMMUL_N0=4, -DMMUL_K0=4).
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_MMUL_NT_T)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 8, 16
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1
+ * @note Values > 8 for M0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ * @param[in] M Number of rows in LHS matrix
+ * @param[in] N Number of columns in RHS matrix
+ * @param[in] K Number of rows in LHS matrix and columns in RHS matrix, which is multiple of MMUL_K0.
+ */
+__kernel void mat_mul_native_mmul_t_t(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int N,
+ const int K)
+{
+#define MMUL_BLOCK_SIZE (MMUL_M0 * MMUL_N0)
+ // For explanations on how this kernel works, please refer to NT/NT kernel. This kernel makes little modifications to it.
+
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get block coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Get thread coordinates within a block
+ const uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ const uint thread_x = thread_id % MMUL_N0;
+ const uint thread_y = (thread_id / MMUL_N0);
+
+ // Starting destination coordinates
+ // Note: We need to clamp dst_x and dst_y because we always need to execute a complete MMUL block! Only after the matrix multiplication
+ // part can we exit the kernel if it is out-of-bound. Remember, we have a cooperative matrix multiplication. Therefore, we need a full block to get the correct results
+ // Although we will never write out-of-bound, we still need this clamp to ensure that we do not read out-of-bound either.
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ const uint lhs_x = dst_y;
+ const uint lhs_y = thread_x;
+
+ // Starting RHS coordinates
+ const uint rhs_x = thread_y;
+ const uint rhs_y = dst_x;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ // MMUL extension accumulate the result in F32 for both F32 and F16
+ TILE(float, M0, N0, c_f32);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c_f32[i].v = 0;
+ })
+
+ for(int k = 0; k < K; k += MMUL_K0)
+ {
+ // A tile of K0xM0 but K0 must be set to 1
+ TILE(DATA_TYPE, 1, M0, a);
+ // A tile of N0xK0 but K0 must be set to 1
+ TILE(DATA_TYPE, N0, 1, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, 1, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c_f32[m0].s[n0] = arm_matrix_multiply(a[0].s[m0], b[n0].s[0], c_f32[m0].s[n0]);
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += MMUL_N0 * sizeof(DATA_TYPE);
+ }
+
+ // For threads "outside" of the dst bound, we do not write but we have to "read" (arm_matrix_multiply). That's why this needs to happen after arm_matrix_multiply
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if defined(HALF_PRECISION)
+ TILE(DATA_TYPE, M0, N0, c);
+
+ // Conversion required for the half precision
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c[m0].s[n0] = c_f32[m0].s[n0];
+ })
+ })
+#else // defined(HALF_PRECISION)
+#define c c_f32
+#endif // defined(HALF_PRECISION)
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (c[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+
+#undef MMUL_BLOCK_SIZE
+}
+#endif // defined(MAT_MUL_NATIVE_MMUL_T_T)
diff --git a/src/core/CL/cl_kernels/common/mat_mul_quantized.cl b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl
new file mode 100644
index 0000000000..7f81ac4549
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl
@@ -0,0 +1,833 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#ifdef BIAS
+// This function performs in-place bias addition for integer datatype when bias is enabled.
+// Note The tile's dimensions used for the LHS and RHS matrices (M0, N0) must be passed at compile time using -DN0, -DM0 (e.g. -DN0=8, -DM0=4).
+inline void perform_bias_addition(uchar *bias_ptr, uint bias_offset_first_element_in_bytes, TILE(int, M0, N0, acc), uint x)
+{
+ TILE(int, 1, N0, bias_tile);
+
+ // below expands to use bias_ptr and bias_offset_first_element_in_bytes
+ T_LOAD(int, 1, N0, BUFFER, bias, x, 0, 1, 0, bias_tile);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, acc, bias_tile, acc);
+}
+#endif // defined(BIAS)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_NT_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=uchar)
+ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4).
+ * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3)
+ * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output with the relu and bounded relu operations.
+ * @note The value of 0 in quantized format is equivalent to the quantization offset of the output data. This should be passed with -DZERO_POINT
+ * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6)
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_NT_NT)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1, 2, 3, 4, 8, 16
+ * @note Values > 8 for M0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8_SIGNED/QASYMM8
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_quantized_nt_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0);
+ const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
+ const uint z = GET_SPATIAL_IDX(2, 1, 0);
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, acc);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ acc[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ int k;
+ for(k = 0; k <= K - K0; k += K0)
+ {
+ TILE(DATA_TYPE, M0, K0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0;
+ })
+
+ // Load tile from the lhs tensor
+ T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+
+ // Load tile from the rhs tensor in a transposed fashion
+ // in order to use T_MMUL_NT_T macro because only this macro
+ // can utilize dot product instruction for Int8/UInt8 by
+ // directly multiplying the rows of Lhs and Rhs tensors.
+ T_LOAD_TRANSPOSED(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, K0, NT, T, a, b, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, K0,
+ {
+ a_sum[0].s[i] += (int)a[i].s[j];
+ })
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ b_sum[0].s[j] += (int)b[j].s[i];
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += K0 * rhs_stride_y;
+ }
+
+#if((K % K0) != 0)
+ /* Leftover Loop */
+ for(; k < K; ++k)
+ {
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, N0, 1, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0;
+ })
+
+ // Load tile from the lhs tensor
+ T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+
+ // Load tile from the rhs tensor in a transposed fashion.
+ // See the main loop for more explanation
+ T_LOAD_TRANSPOSED(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, 1, NT, T, a, b, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, 1,
+ {
+ a_sum[0].s[i] += (int)a[i].s[j];
+ })
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, 1,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ b_sum[0].s[j] += (int)b[j].s[i];
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += 1 * rhs_stride_y;
+ }
+#endif // ((K % K0) != 0)
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ acc[i].s[j] -= ((int)RHS_OFFSET) * a_sum[0].s[i] + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+
+ const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0;
+ const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0;
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, accq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq);
+
+ T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, accq, accq);
+
+ TILE(int, M0, 1, indirect_buffer);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond));
+ });
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, accq, indirect_buffer);
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_NT_NT)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_NT_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=uchar)
+ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4).
+ * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3)
+ * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output bounded activation functions.
+ * @note The value of 0 in quantized format is equivalent to the quantization offset of the output data. This should be passed with -DZERO_POINT
+ * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6)
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_NT_T)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1, 2, 3, 4, 8, 16
+ * @note Values > 8 for M0, N0, K0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_quantized_nt_t(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0);
+ const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
+ const uint z = GET_SPATIAL_IDX(2, 1, 0);
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += x * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, acc);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ acc[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ int k;
+ for(k = 0; k <= K - K0; k += K0)
+ {
+ TILE(DATA_TYPE, M0, K0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0;
+ })
+
+ // Load tile from lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, K0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, K0, NT, T, a, b, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, K0,
+ {
+ a_sum[0].s[i] += (int)a[i].s[j];
+ })
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, K0,
+ {
+ b_sum[0].s[i] += (int)b[i].s[j];
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE);
+ }
+
+#if((K % K0) != 0)
+ // Leftover loop
+ for(; k < K; ++k)
+ {
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, N0, 1, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0;
+ })
+
+ // Load tile from lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, 1, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, 1, NT, T, a, b, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, 1,
+ {
+ a_sum[0].s[i] += (int)a[i].s[j];
+ })
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, 1,
+ {
+ b_sum[0].s[i] += (int)b[i].s[j];
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE);
+ }
+#endif // ((K % K0) != 0)
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ acc[i].s[j] -= ((int)(RHS_OFFSET)) * a_sum[0].s[i] + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+
+ const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0;
+ const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0;
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, accq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq);
+
+ T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, accq, accq);
+
+ TILE(int, M0, 1, indirect_buffer);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond));
+ });
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, accq, indirect_buffer);
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_NT_T)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_T_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS non-transposed
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=uchar)
+ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4).
+ * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3)
+ * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output with the relu and bounded relu operations.
+ * @note The value of 0 in quantized format is equivalent to the quantization offset of the output data. This should be passed with -DZERO_POINT
+ * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6)
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_T_NT)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1, 2, 3, 4, 8, 16
+ * @note Values > 8 for M0, N0 and K0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_quantized_t_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0);
+ const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
+ const uint z = GET_SPATIAL_IDX(2, 1, 0);
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += y * sizeof(DATA_TYPE) + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, acc);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ acc[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ int k;
+ for(k = 0; k <= K - K0; k += K0)
+ {
+ TILE(DATA_TYPE, M0, K0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0;
+ })
+
+ // Load tile from the lhs/rhs tensors in a transposed fashion
+ // see mat_mul_native_quantized_nt_nt main loop for more explanation
+ T_LOAD_TRANSPOSED(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD_TRANSPOSED(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, K0, NT, T, a, b, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, M0,
+ {
+ a_sum[0].s[j] += (int)a[j].s[i];
+ })
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ b_sum[0].s[j] += (int)b[j].s[i];
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += K0 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += K0 * rhs_stride_y;
+ }
+
+#if((K % K0) != 0)
+ /* Leftover Loop */
+ for(; k < K; ++k)
+ {
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, N0, 1, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0;
+ })
+
+ // Load tile from the lhs/rhs tensors in a transposed fashion
+ // see mat_mul_native_quantized_nt_nt main loop for more explanation
+ T_LOAD_TRANSPOSED(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD_TRANSPOSED(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, 1, NT, T, a, b, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, 1,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, M0,
+ {
+ a_sum[0].s[j] += (int)a[j].s[i];
+ })
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, 1,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ b_sum[0].s[j] += (int)b[j].s[i];
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += 1 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += 1 * rhs_stride_y;
+ }
+#endif // ((K % K0) != 0)
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ acc[i].s[j] -= ((int)(RHS_OFFSET)) * a_sum[0].s[i] + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+
+ const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0;
+ const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0;
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, accq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq);
+
+ T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, accq, accq);
+
+ TILE(int, M0, 1, indirect_buffer);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond));
+ });
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, accq, indirect_buffer);
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_T_NT)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_T_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS transposed
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=uchar)
+ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4).
+ * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3)
+ * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output with the relu and bounded relu operations.
+ * @note The value of 0 in quantized format is equivalent to the quantization offset of the output data. This should be passed with -DZERO_POINT
+ * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6)
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_T_T)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 8, 16
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1, 2, 3, 4, 8, 16
+ * @note Values > 8 for M0, N0 and K0 are not expected to be efficient
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_quantized_t_t(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0);
+ const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
+ const uint z = GET_SPATIAL_IDX(2, 1, 0);
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += y * sizeof(DATA_TYPE) + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += x * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, acc);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ acc[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ int k;
+ for(k = 0; k <= K - K0; k += K0)
+ {
+ TILE(DATA_TYPE, M0, K0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0;
+ })
+
+ // Load tile from the lhs tensor in a transposed fashion
+ // see mat_mul_native_quantized_nt_nt main loop for more explanation
+ T_LOAD_TRANSPOSED(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+
+ // Load tile from the rhs tensor
+ T_LOAD(DATA_TYPE, N0, K0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, K0, NT, T, a, b, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, K0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, M0,
+ {
+ a_sum[0].s[j] += (int)a[j].s[i];
+ })
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, K0,
+ {
+ b_sum[0].s[i] += (int)b[i].s[j];
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += K0 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE);
+ }
+
+#if((K % K0) != 0)
+ /* Leftover Loop */
+ for(; k < K; ++k)
+ {
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, N0, 1, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0;
+ })
+
+ // Load tile from the lhs tensor in a transposed fashion
+ // see mat_mul_native_quantized_nt_nt main loop for more explanation
+ T_LOAD_TRANSPOSED(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+
+ // Load tile from the rhs tensor
+ T_LOAD(DATA_TYPE, N0, 1, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, 1, NT, T, a, b, acc);
+
+ LOOP_UNROLLING(int, i, 0, 1, 1,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, M0,
+ {
+ a_sum[0].s[j] += (int)a[j].s[i];
+ })
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, 1,
+ {
+ b_sum[0].s[i] += (int)b[i].s[j];
+ })
+ })
+
+ lhs_offset_first_element_in_bytes += 1 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE);
+ }
+#endif // ((K % K0) != 0)
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ acc[i].s[j] -= ((int)RHS_OFFSET) * a_sum[0].s[i] + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+
+ const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0;
+ const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0;
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, accq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq);
+
+ T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, accq, accq);
+
+ TILE(int, M0, 1, indirect_buffer);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond));
+ });
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, accq, indirect_buffer);
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_T_T)
diff --git a/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl b/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl
new file mode 100644
index 0000000000..fdfb75d39c
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl
@@ -0,0 +1,832 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#ifdef BIAS
+// This function performs in-place bias addition for integer datatype when bias is enabled.
+// Note The tile's dimensions used for the LHS and RHS matrices (M0, N0) must be passed at compile time using -DN0, -DM0 (e.g. -DN0=8, -DM0=4).
+inline void perform_bias_addition(uchar *bias_ptr, uint bias_offset_first_element_in_bytes, TILE(int, M0, N0, acc), uint x)
+{
+ TILE(int, 1, N0, bias_tile);
+
+ // below expands to use bias_ptr and bias_offset_first_element_in_bytes
+ T_LOAD(int, 1, N0, BUFFER, bias, x, 0, 1, 0, bias_tile);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, acc, bias_tile, acc);
+}
+#endif // defined(BIAS)
+
+#define MMUL_BLOCK_SIZE (MMUL_M0 * MMUL_N0) // MMUL block size for the output matrix
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_NT_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only
+ *
+ * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it
+ * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=uchar)
+ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at
+ * compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4).
+ * @note The number of leftover outputs rows/columns must be passed using -DN0_LEFTOVER and -DM0_LEFTOVER
+ * (e.g. -DN0_LEFTOVER=2, -DM0_LEFTOVER=3)
+ * @note The dimensions M, N, K must be passed at compile time using -DK (e.g. -DM=5, -DN=8, -DK=6).
+ * K must be a multiple of 16.
+ * @note MMUL block sizes must be passed at compile time using -DMMUL_K0, -DMMUL_M0, -DMMUL_N0
+ * (e.g. -DMMUL_K0=16, -DMMUL_M0=4, -DMMUL_N0=4)
+ * @note If there is bias -DBIAS option must be passed at compile time
+ * @note Quantization offsets of lhs, rhs and dst tensors must be passed at compile time using -DLHS_OFFSET,
+ * -DRHS_OFFSET, -DDST_OFFSET (e.g. -DLHS_OFFSET=10, -DRHS_OFFSET=0, -DDST_OFFSET=-6)
+ * @note Effective quantization multiplier and shift for the destination tensor must be passed at compile time using
+ * -DDST_MULTIPLIER and -DDST_SHIFT (e.g. -DDST_MULTIPLIER=2091, -DST_SHIFT=8)
+ * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_MMUL_NT_NT)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 4
+ * @note For a generic view on how the MMUL works, see mat_mul_mmul.cl
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8_SIGNED/QASYMM8
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: S32
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_quantized_mmul_nt_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ // The explanation of how this kernel works is very similar to the explanation given in
+ // mat_mul_mmul.cl. The MMUL logic, and terminology is the same. The only difference is
+ // in quantization multiplication, the MMUL block sizes are (4 x 16) for Lhs matrix and
+ // (16 x 4) for Rhs matrix, resulting in (4 x 4) MMUL block size for the destination.
+ //
+ // Figures 1, 2 and 3 in the previous explanation works the same. Since the Lhs and Rhs
+ // MMUL block sizes are different in quantized extension, the thread access pattern is
+ // slightly different. We can redraw Figure 4 (Thread access pattern) as follows:
+ //
+ // (Modified Figure 4 from mat_mul_mmul.cl)
+ // Thread Access Layouts in LHS & RHS matrices
+ //
+ // LHS matrix
+ // 4 times 4 times 4 times 4 times
+ // _______________________________________________________________
+ // |T0_|T0_|T0_|T0_|T1_|T1_|T1_|T1_|T2_|T2_|T2_|T2_|T3_|T3_|T3_|T3_|
+ // |T0_| ... |
+ // M0 | . . |
+ // Times | . . |
+ // | . . |
+ // |T0_|T0_|T0_|T0_|T1_|T1_|T1_|T1_|T2_|T2_|T2_|T2_|T3_|T3_|T3_|T3_|
+ // |T4_|T4_|T4_|T4_|T5_|T5_|T5_|T5_|T6_|T6_|T6_|T6_|T7_|T7_|T7_|T7_|
+ // |T4_|T4_|T4_|T4_|T5_|T5_|T5_|T5_|T6_|T6_|T6_|T6_|T7_|T7_|T7_|T7_|
+ // M0 | . . |
+ // Times | . . |
+ // | . . |
+ // |T4_|T4_|T4_|T4_|T5_|T5_|T5_|T5_|T6_|T6_|T6_|T6_|T7_|T7_|T7_|T7_|
+ // |T8_|T8_|T8_|T8_|T9_|T9_|T9_|T9_|T10|T10|T10|T10|T11|T11|T11|T11|
+ // M0 | . |
+ // Times | . |
+ // | . |
+ // |T8_|T8_|T8_|T8_|T9_|T9_|T9_|T9_|T10|T10|T10|T10|T11|T11|T11|T11|
+ // M0 | . |
+ // Times | . |
+ // | . |
+ // |T12|T12|T12|T12|T13|T13|T13|T13|T14|T14|T14|T14|T15|T15|T15|T15|
+ //
+ //
+ // RHS Matrix
+ //
+ // __________N0 times______N0 times____________________N0 times_______
+ // |__T0__| ... |__T0__|__T1__| ... |__T1__| ... |__T3__| ... |__T3__|
+ // 4 times |__T0__| ... |__T0__|__T1__| ... |__T1__| ... |__T3__| ... |__T3__|
+ // |__T0__| ... |__T0__|__T1__| ... |__T1__| ... |__T3__| ... |__T3__|
+ // |__T0__| ... |__T0__|__T1__| ... |__T1__| ... |__T3__| ... |__T3__|
+ // |__T4__| ... |__T4__|__T5__| ... |__T5__| ... |__T7__| ... |__T7__|
+ // 4 times |__T4__| ... |__T4__|__T5__| ... |__T5__| ... |__T7__| ... |__T7__|
+ // |__T4__| ... |__T4__|__T5__| ... |__T5__| ... |__T7__| ... |__T7__|
+ // X |__T4__| ... |__T4__|__T5__| ... |__T5__| ... |__T7__| ... |__T7__|
+ // |__T8__| ... |__T8__|__T9__| ... |__T9__| ... |__T11_| ... |__T11_|
+ // |__T8__| ... |__T8__|__T9__| ... |__T9__| ... |__T11_| ... |__T11_|
+ // 4 times |__T8__| ... |__T8__|__T9__| ... |__T9__| ... |__T11_| ... |__T11_|
+ // |__T8__| ... |__T8__|__T9__| ... |__T9__| ... |__T11_| ... |__T11_|
+ // |__T12_| ... |__T12_|__T13_| ... |__T13_| ... |__T15_| ... |__T15_|
+ // 4 times |__T12_| ... |__T12_|__T13_| ... |__T13_| ... |__T15_| ... |__T15_|
+ // |__T12_| ... |__T12_|__T13_| ... |__T13_| ... |__T15_| ... |__T15_|
+ // |__T12_|_____|__T12_|__T13_|______|__T13_|_____|__T15_|_____|__T15_|
+ //
+ //
+ // The logic behind this thread access pattern is already descried in the explanation
+ // in mat_mul_mmul.cl. The only change is threads accesses are extended to 4 elements
+ // from 1, in rightward direction in Lhs, and in downward direction in Rhs, because they
+ // are now operating on 4 char/uchar's (again 32-bit data), instead of one 32-bit floating point.
+ //
+ // The mathematical view of the matrix multiplication explained in Figure 5 also holds for this,
+ // except the dimension 4 is 16 instead, but the vector notations do not change, i.e. it's as follows:
+ //
+ // Settings:
+ // - a 8 x 16 LHS section
+ // - 16 x 8 RHS section
+ // - Each vector variable ai, bj represent a 16x1 vector
+ // - ^T (superscript T) denotes transpose
+ // - M0 = N0 = 2
+ // - MMUL_N0 = MMUL_M0 = 4, MMUL_K0 = 16
+ //
+ //
+ // (Modified Figure 5)
+ // Mathematical view of the Matrix Multiplication
+ //
+ // LHS RHS DST
+ // [ a1^T ] [ b1 b2 b3 b4 b5 b6 b7 ] [ a1^Tb1 a1^Tb2 a1^Tb3 ... a1^Tb7 ]
+ // [ a2^T ] 16 x 8 [ a2^Tb1 a2^Tb2 a2^Tb3 ... a2^Tb7 ]
+ // [ a3^T ] [ ]
+ // [ a4^T ] = [ . . ]
+ // [ a5^T ] X [ . . ]
+ // [ a6^T ] [ . . ]
+ // [ a7^T ] [ ]
+ // [ a8^T ] [ a7^Tb1 a7^Tb2 a7^Tb3 ... a7^Tb7 ]
+ // 8 x 16 8 x 8
+ //
+ //
+ // For the first iteration, i.e. (m0, n0) = (0, 0), the arm_matrix_multiply would multiply the following matrices:
+ //
+ // [ a1^T ] [ b1 b3 b5 b7 ] [ a1^Tb1 a1^Tb3 a1^Tb5 a1^Tb7 ]
+ // [ a3^T ] x 4 x 4 = [ a3^Tb1 a1^Tb3 a1^Tb5 a1^Tb7 ]
+ // [ a5^T ] [ a5^Tb1 a1^Tb3 a1^Tb5 a1^Tb7 ]
+ // [ a7^T ] [ a7^Tb1 a7^Tb3 a7^Tb5 a7^Tb7 ]
+ // 4 x 4 4 x 4
+ // The elements calculated in the 4x4 output block are the "interleaved" elements in the DST above.
+ // When we follow for each combination of (m0, n0), every element of the DST matrix "section" is filled.
+ //
+ // Please refer to mat_mul_mmul.cl for more details.
+
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get section coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Get thread coordinates within an mmul block
+ const uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ const uint thread_x = thread_id % MMUL_N0;
+ const uint thread_y = (thread_id / MMUL_N0);
+
+ // Calculate dst coordinates
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ const uint lhs_x = K0 * thread_x;
+ const uint lhs_y = dst_y;
+
+ // Starting RHS coordinates
+ const uint rhs_x = dst_x;
+ const uint rhs_y = K0 * thread_y;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, c);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ // Calculate row and column sums
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(1, 1, 1, 1);
+
+ for(int k = 0; k < lhs_w; k += MMUL_K0)
+ {
+ // A tile of M0xK0 but K0 must be set to K0
+ TILE(DATA_TYPE, M0, K0, a);
+ // A tile of K0xN0 but K0 must be set to K0
+ TILE(DATA_TYPE, K0, N0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_b = (VEC_DATA_TYPE(DATA_TYPE, K0))(b[0].s[n0], b[1].s[n0], b[2].s[n0], b[3].s[n0]);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ c[m0].s[n0] = arm_matrix_multiply(a[m0].v, vec_b, c[m0].s[n0]);
+ })
+
+#if LHS_OFFSET != 0
+ // Column Sum of B: Calculate the sum of columns by multiplying B
+ // with a matrix of 1's from Left
+ b_sum[0].s[n0] = arm_matrix_multiply(vec_1, vec_b, b_sum[0].s[n0]);
+#endif // LHS_OFFSET != 0s
+ })
+
+#if RHS_OFFSET != 0
+ // Row Sum of A: Calculate the sum of rows by multiplying A with
+ // a matrix of 1's from Right
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ a_sum[0].s[m0] = arm_matrix_multiply(a[m0].v, vec_1, a_sum[0].s[m0]);
+ })
+#endif // RHS_OFFSET != 0
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += MMUL_K0 * rhs_stride_y;
+ }
+
+ // Do not write if the coordinates are out of bound
+ // But, read has to happen as arm_matrix_multiply() expects certain number of calls
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if RHS_OFFSET != 0 || LHS_OFFSET != 0
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ const int A = ((int)RHS_OFFSET) * a_sum[0].s[i];
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ c[i].s[j] -= A + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+#endif // RHS_OFFSET != 0 || LHS_OFFSET != 0
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, cq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_NT_NT)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_NT_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS transposed - buffer only
+ *
+ * Supported block configurations:
+ * - M0 > 0
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 4
+ *
+ * Similar to mat_mul_native_quantized_mmul_nt_nt()
+ */
+__kernel void mat_mul_native_quantized_mmul_nt_t(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get section coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Get thread coordinates within an mmul block
+ const uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ const uint thread_x = thread_id % MMUL_N0;
+ const uint thread_y = (thread_id / MMUL_N0);
+
+ // Calculate dst coordinates
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ const uint lhs_x = K0 * thread_x;
+ const uint lhs_y = dst_y;
+
+ // Starting RHS coordinates
+ const uint rhs_x = K0 * thread_y;
+ const uint rhs_y = dst_x;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, c);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ // Calculate row and column sums
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(1, 1, 1, 1);
+
+ for(int k = 0; k < lhs_w; k += MMUL_K0)
+ {
+ // A tile of M0xK0 but K0 must be set to K0
+ TILE(DATA_TYPE, M0, K0, a);
+ // A tile of K0xN0 but K0 must be set to K0
+ TILE(DATA_TYPE, N0, K0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, K0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c[m0].s[n0] = arm_matrix_multiply(a[m0].v, b[n0].v, c[m0].s[n0]);
+ })
+ })
+
+#if RHS_OFFSET != 0
+ // Row Sum of A: Calculate the sum of rows by multiplying A with
+ // a matrix of 1's from Right
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ a_sum[0].s[m0] = arm_matrix_multiply(a[m0].v, vec_1, a_sum[0].s[m0]);
+ })
+#endif // RHS_OFFSET != 0
+
+#if LHS_OFFSET != 0
+ // Column Sum of B: Calculate the sum of columns by multiplying B
+ // with a matrix of 1's from Left
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ b_sum[0].s[n0] = arm_matrix_multiply(vec_1, b[n0].v, b_sum[0].s[n0]);
+ })
+#endif // LHS_OFFSET != 0
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ rhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ }
+
+ // Do not write if the coordinates are out of bound
+ // But, read has to happen as arm_matrix_multiply() expects certain number of calls
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if RHS_OFFSET != 0 || LHS_OFFSET != 0
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ const int A = ((int)RHS_OFFSET) * a_sum[0].s[i];
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ c[i].s[j] -= A + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+#endif // RHS_OFFSET != 0 || LHS_OFFSET != 0
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, cq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_NT_T)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS non-transposed
+ *
+ * Supported block configurations:
+ * - M0 = 1, 2, 3, 4, 8, 16
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 4
+ *
+ * Similar to mat_mul_native_quantized_mmul_nt_nt()
+ */
+__kernel void mat_mul_native_quantized_mmul_t_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get section coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Get thread coordinates within an mmul block
+ const uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ const uint thread_x = thread_id % MMUL_N0;
+ const uint thread_y = (thread_id / MMUL_N0);
+
+ // Calculate dst coordinates
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ const uint lhs_x = dst_y;
+ const uint lhs_y = K0 * thread_x;
+
+ // Starting RHS coordinates
+ const uint rhs_x = dst_x;
+ const uint rhs_y = K0 * thread_y;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, c);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ // Calculate row and column sums
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(1, 1, 1, 1);
+
+ for(int k = 0; k < lhs_h; k += MMUL_K0)
+ {
+ TILE(DATA_TYPE, K0, M0, a);
+ TILE(DATA_TYPE, K0, N0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_a = (VEC_DATA_TYPE(DATA_TYPE, K0))(a[0].s[m0], a[1].s[m0], a[2].s[m0], a[3].s[m0]);
+
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_b = (VEC_DATA_TYPE(DATA_TYPE, K0))(b[0].s[n0], b[1].s[n0], b[2].s[n0], b[3].s[n0]);
+
+ c[m0].s[n0] = arm_matrix_multiply(vec_a, vec_b, c[m0].s[n0]);
+ })
+
+#if RHS_OFFSET != 0
+ // Row Sum of A: Calculate the sum of rows by multiplying A with
+ // a matrix of 1's from Right
+ a_sum[0].s[m0] = arm_matrix_multiply(vec_a, vec_1, a_sum[0].s[m0]);
+#endif // RHS_OFFSET != 0
+ })
+
+#if LHS_OFFSET != 0
+ // Column Sum of B: Calculate the sum of columns by multiplying B
+ // with a matrix of 1's from Left
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_b = (VEC_DATA_TYPE(DATA_TYPE, K0))(b[0].s[n0], b[1].s[n0], b[2].s[n0], b[3].s[n0]);
+
+ b_sum[0].s[n0] = arm_matrix_multiply(vec_1, vec_b, b_sum[0].s[n0]);
+ })
+#endif // LHS_OFFSET != 0
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += MMUL_K0 * rhs_stride_y;
+ }
+
+ // Do not write if the coordinates are out of bound
+ // But, read has to happen as arm_matrix_multiply() expects certain number of calls
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if RHS_OFFSET != 0 || LHS_OFFSET != 0
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ const int A = ((int)RHS_OFFSET) * a_sum[0].s[i];
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ c[i].s[j] -= A + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+#endif // RHS_OFFSET != 0 || LHS_OFFSET != 0
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, cq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_NT)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS transposed
+ *
+ * Supported block configurations:
+ * - M0 = 1, 2, 3, 4, 8, 16
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 4
+ *
+ * Similar to mat_mul_native_quantized_mmul_nt_nt()
+ */
+__kernel void mat_mul_native_quantized_mmul_t_t(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get section coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Get thread coordinates within an mmul block
+ const uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ const uint thread_x = thread_id % MMUL_N0;
+ const uint thread_y = (thread_id / MMUL_N0);
+
+ // Calculate dst coordinates
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ const uint lhs_x = dst_y;
+ const uint lhs_y = K0 * thread_x;
+
+ // Starting RHS coordinates
+ const uint rhs_x = K0 * thread_y;
+ const uint rhs_y = dst_x;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, c);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ // Calculate row and column sums
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(1, 1, 1, 1);
+
+ for(int k = 0; k < lhs_h; k += MMUL_K0)
+ {
+ TILE(DATA_TYPE, K0, M0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, K0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_a = (VEC_DATA_TYPE(DATA_TYPE, K0))(a[0].s[m0], a[1].s[m0], a[2].s[m0], a[3].s[m0]);
+
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c[m0].s[n0] = arm_matrix_multiply(vec_a, b[n0].v, c[m0].s[n0]);
+ })
+#if RHS_OFFSET != 0
+ // Row Sum of A: Calculate the sum of rows by multiplying A with
+ // a matrix of 1's from Right
+ a_sum[0].s[m0] = arm_matrix_multiply(vec_a, vec_1, a_sum[0].s[m0]);
+#endif // RHS_OFFSET != 0
+ })
+
+#if LHS_OFFSET != 0
+ // Column Sum of B: Calculate the sum of columns by multiplying B
+ // with a matrix of 1's from Left
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ b_sum[0].s[n0] = arm_matrix_multiply(vec_1, b[n0].v, b_sum[0].s[n0]);
+ })
+#endif // LHS_OFFSET != 0
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ }
+
+ // Do not write if the coordinates are out of bound
+ // But, read has to happen as arm_matrix_multiply() expects certain number of calls
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if RHS_OFFSET != 0 || LHS_OFFSET != 0
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ const int A = ((int)RHS_OFFSET) * a_sum[0].s[i];
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ c[i].s[j] -= A + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+#endif // RHS_OFFSET != 0 || LHS_OFFSET != 0
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, cq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_T)
diff --git a/src/core/CL/cl_kernels/mean_stddev_normalization.cl b/src/core/CL/cl_kernels/common/mean_stddev_normalization.cl
index 4141d3e8b7..22abf64874 100644
--- a/src/core/CL/cl_kernels/mean_stddev_normalization.cl
+++ b/src/core/CL/cl_kernels/common/mean_stddev_normalization.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2019-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,7 +62,11 @@ __kernel void mean_stddev_normalization(
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
sum = 0.f;
+#ifdef MEANSTDNORM_HALF
+ VEC_DATA_TYPE(float, VEC_SIZE)
+#else /* MEANSTDNORM_HALF */
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+#endif /* MEANSTDNORM_HALF */
sum_sq = 0.f;
// Calculate partial sum
int i = 0;
@@ -73,34 +77,34 @@ __kernel void mean_stddev_normalization(
data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)offset(&in, i, 0));
sum += data;
+#ifdef MEANSTDNORM_HALF
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ dsq = CONVERT(data * data, VEC_DATA_TYPE(float, VEC_SIZE));
+ sum_sq += dsq;
+#else /* MEANSTDNORM_HALF */
sum_sq += data * data;
+#endif /* MEANSTDNORM_HALF */
}
// Perform reduction
-#if VEC_SIZE > 8
- sum.s01234567 += sum.s89abcdef;
- sum_sq.s01234567 += sum_sq.s89abcdef;
-#endif // VEC_SIZE > 8
-#if VEC_SIZE > 4
- sum.s0123 += sum.s4567;
- sum_sq.s0123 += sum_sq.s4567;
-#endif // VEC_SIZE > 4
-#if VEC_SIZE > 2
- sum.s01 += sum.s23;
- sum_sq.s01 += sum_sq.s23;
-#endif // VEC_SIZE > 2
- sum.s0 += sum.s1;
- sum_sq.s0 += sum_sq.s1;
+ sum = SUM_REDUCE(sum, VEC_SIZE);
+ sum_sq = SUM_REDUCE(sum_sq, VEC_SIZE);
+
+#if VEC_SIZE > 1
+#define sum sum.s0
+#define sum_sq sum_sq.s0
+#endif // VEC_SIZE > 1
+
// Left-overs loop
for(; i < WIDTH; ++i)
{
DATA_TYPE data = *((__global DATA_TYPE *)offset(&in, i, 0));
- sum.s0 += data;
- sum_sq.s0 += data * data;
+ sum += data;
+ sum_sq += data * data;
}
- DATA_TYPE mean = sum.s0 / WIDTH;
- DATA_TYPE var = (sum_sq.s0 / WIDTH) - (mean * mean);
+ DATA_TYPE mean = sum / WIDTH;
+ DATA_TYPE var = (sum_sq / WIDTH) - (mean * mean);
DATA_TYPE stddev_inv = 1.f / sqrt(var + EPSILON);
i = 0;
diff --git a/src/core/CL/cl_kernels/memset.cl b/src/core/CL/cl_kernels/common/memset.cl
index bb46a49f84..9ff25f3af4 100644
--- a/src/core/CL/cl_kernels/memset.cl
+++ b/src/core/CL/cl_kernels/common/memset.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/minmax_layer.cl b/src/core/CL/cl_kernels/common/minmax_layer.cl
index 655696f9a1..49356451df 100644
--- a/src/core/CL/cl_kernels/minmax_layer.cl
+++ b/src/core/CL/cl_kernels/common/minmax_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/nonmax.cl b/src/core/CL/cl_kernels/common/nonmax.cl
index ab13131807..702e635a89 100644
--- a/src/core/CL/cl_kernels/nonmax.cl
+++ b/src/core/CL/cl_kernels/common/nonmax.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/pad_layer.cl b/src/core/CL/cl_kernels/common/pad_layer.cl
index 903e924a2f..5ae4ec884d 100644
--- a/src/core/CL/cl_kernels/pad_layer.cl
+++ b/src/core/CL/cl_kernels/common/pad_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/permute.cl b/src/core/CL/cl_kernels/common/permute.cl
index db9e7ecc25..1a97ca7495 100644
--- a/src/core/CL/cl_kernels/permute.cl
+++ b/src/core/CL/cl_kernels/common/permute.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,7 +54,7 @@ __kernel void permute(TENSOR4D_DECLARATION(input),
{
Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT(input, DEPTH_IN);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
+ Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output);
int out_index[4] = { 0 };
int in_index[4] = { 0 };
diff --git a/src/core/CL/cl_kernels/pixelwise_mul_float.cl b/src/core/CL/cl_kernels/common/pixelwise_mul_float.cl
index 845e1c9860..10875293a9 100644
--- a/src/core/CL/cl_kernels/pixelwise_mul_float.cl
+++ b/src/core/CL/cl_kernels/common/pixelwise_mul_float.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,6 +36,10 @@
#include "activation_float_helpers.h"
#endif // defined(ACTIVATION_TYPE)
+#define VEC_ACC_TYPE VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE_OUT)
+#define VEC_OUT_TYPE VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT)
+#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE_OUT)
+
/** Performs a pixelwise multiplication with float scale of either integer or float inputs.
*
* @attention The inputs and output data types need to be passed at compile time using -DDATA_TYPE_IN1, -DDATA_TYPE_IN2 and -DDATA_TYPE_OUT:
@@ -73,35 +77,45 @@
__kernel void pixelwise_mul_float(
TENSOR3D_DECLARATION(in1),
TENSOR3D_DECLARATION(in2),
+#if !defined(IN_PLACE)
TENSOR3D_DECLARATION(out),
+#endif // !defined(IN_PLACE)
const float scale)
{
// Get pixels pointer
- Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1);
- Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+ size_t x = max((int)(get_global_id(0) * VEC_SIZE_OUT - (VEC_SIZE_OUT - VEC_SIZE_LEFTOVER) % VEC_SIZE_OUT), 0);
+ size_t y = get_global_id(1);
+ size_t z = get_global_id(2);
+
+ __global uchar *in1_addr = in1_ptr + in1_offset_first_element_in_bytes + x * in1_stride_x + y * in1_stride_y + z * in1_stride_z;
+ __global uchar *in2_addr = in2_ptr + in2_offset_first_element_in_bytes + x * in2_stride_x + y * in2_stride_y + z * in2_stride_z;
+ __global uchar *
+#if !defined(IN_PLACE)
+ out_addr = out_ptr + out_offset_first_element_in_bytes + x * out_stride_x + y * out_stride_y + z * out_stride_z;
+#else // !defined(IN_PLACE)
+#if defined(SRC1_IN_PLACE)
+ out_addr = in1_addr;
+#else //defined(SRC1_IN_PLACE)
+ out_addr = in2_addr;
+#endif //defined(SRC1_IN_PLACE)
+#endif // !defined(IN_PLACE)
// Load data
- VEC_DATA_TYPE(ACC_DATA_TYPE, 16)
- in1_data = CONVERT(vload16(0, (__global DATA_TYPE_IN1 *)in1.ptr), VEC_DATA_TYPE(ACC_DATA_TYPE, 16));
- VEC_DATA_TYPE(ACC_DATA_TYPE, 16)
- in2_data = CONVERT(vload16(0, (__global DATA_TYPE_IN2 *)in2.ptr), VEC_DATA_TYPE(ACC_DATA_TYPE, 16));
+ VEC_ACC_TYPE in1_data = CONVERT((VEC_DATA_TYPE(DATA_TYPE_IN1, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN1)(0, (__global DATA_TYPE_IN1 *)in1_addr)), VEC_ACC_TYPE);
+ VEC_ACC_TYPE in2_data = CONVERT((VEC_DATA_TYPE(DATA_TYPE_IN2, VEC_SIZE_OUT))(VLOAD(VEC_SIZE_IN2)(0, (__global DATA_TYPE_IN2 *)in2_addr)), VEC_ACC_TYPE);
// Perform multiplication
#ifdef DATA_TYPE_FLOAT
- VEC_DATA_TYPE(DATA_TYPE_OUT, 16)
- res = CONVERT(in1_data * in2_data * (ACC_DATA_TYPE)scale, VEC_DATA_TYPE(DATA_TYPE_OUT, 16));
+ VEC_OUT_TYPE res0 = CONVERT(in1_data * in2_data * (ACC_DATA_TYPE)scale, VEC_OUT_TYPE);
#else /* DATA_TYPE_FLOAT */
- VEC_DATA_TYPE(DATA_TYPE_OUT, 16)
- res = CONVERT_OP_FLOAT(CONVERT_OP_FLOAT((convert_float16(in1_data * in2_data) * scale), VEC_DATA_TYPE(ACC_DATA_TYPE, 16), ROUND), VEC_DATA_TYPE(DATA_TYPE_OUT, 16), ROUND);
+ VEC_OUT_TYPE res0 = CONVERT_OP_FLOAT(CONVERT_OP_FLOAT((CONVERT(in1_data * in2_data, VEC_FLOAT) * scale), VEC_ACC_TYPE, ROUND), VEC_OUT_TYPE, ROUND);
#endif /* DATA_TYPE_FLOAT */
#if defined(ACTIVATION_TYPE)
- vstore16(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE_OUT, VEC_SIZE, res, A_VAL, B_VAL), 0, (__global DATA_TYPE_OUT *)out.ptr);
-#else // defined(ACTIVATION_TYPE)
- // Store result
- vstore16(res, 0, (__global DATA_TYPE_OUT *)out.ptr);
+ res0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE_OUT, VEC_SIZE_OUT, res0, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
+
+ STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE_OUT, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif /* defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(ACC_DATA_TYPE) && defined(DATA_TYPE_OUT) */
@@ -155,7 +169,7 @@ __kernel void pixelwise_mul_complex(
res = { vin1.x *vin2.x - vin1.y * vin2.y, vin1.x *vin2.y + vin2.x * vin1.y };
#if defined(ACTIVATION_TYPE)
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, res, A_VAL, B_VAL), 0, (__global DATA_TYPE *)out.ptr);
+ vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE_OUT, res, A_VAL, B_VAL), 0, (__global DATA_TYPE *)out.ptr);
#else // defined(ACTIVATION_TYPE)
// Store result
vstore2(res, 0, (__global DATA_TYPE *)out.ptr);
diff --git a/src/core/CL/cl_kernels/pixelwise_mul_int.cl b/src/core/CL/cl_kernels/common/pixelwise_mul_int.cl
index b0bd338147..6d1c2d0c79 100644
--- a/src/core/CL/cl_kernels/pixelwise_mul_int.cl
+++ b/src/core/CL/cl_kernels/common/pixelwise_mul_int.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,6 +36,10 @@
#define CONVERT_DOWN(x, type) CONVERT_RTE(x, type)
#if defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(ACC_DATA_TYPE) && defined(DATA_TYPE_OUT)
+
+#define VEC_ACC_TYPE VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE_OUT)
+#define VEC_OUT_TYPE VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT)
+
/** Performs a pixelwise multiplication with integer scale of integer inputs.
*
* @attention The inputs and output data types need to be passed at compile time using -DDATA_TYPE_IN1, -DDATA_TYPE_IN2 and -DDATA_TYPE_OUT:
@@ -72,30 +76,42 @@
__kernel void pixelwise_mul_int(
TENSOR3D_DECLARATION(in1),
TENSOR3D_DECLARATION(in2),
+#if !defined(IN_PLACE)
TENSOR3D_DECLARATION(out),
+#endif // !defined(IN_PLACE)
const uint scale)
{
- // Get pixels pointer
- Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1);
- Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+ size_t x = max((int)(get_global_id(0) * VEC_SIZE_OUT - (VEC_SIZE_OUT - VEC_SIZE_LEFTOVER) % VEC_SIZE_OUT), 0);
+ size_t y = get_global_id(1);
+ size_t z = get_global_id(2);
- // Load data
- VEC_DATA_TYPE(ACC_DATA_TYPE, 16)
- in1_data = CONVERT(vload16(0, (__global DATA_TYPE_IN1 *)in1.ptr), VEC_DATA_TYPE(ACC_DATA_TYPE, 16));
- VEC_DATA_TYPE(ACC_DATA_TYPE, 16)
- in2_data = CONVERT(vload16(0, (__global DATA_TYPE_IN2 *)in2.ptr), VEC_DATA_TYPE(ACC_DATA_TYPE, 16));
+ __global uchar *in1_addr = in1_ptr + in1_offset_first_element_in_bytes + x * in1_stride_x + y * in1_stride_y + z * in1_stride_z;
+ __global uchar *in2_addr = in2_ptr + in2_offset_first_element_in_bytes + x * in2_stride_x + y * in2_stride_y + z * in2_stride_z;
+ __global uchar *
+#if !defined(IN_PLACE)
+ out_addr = out_ptr + out_offset_first_element_in_bytes + x * out_stride_x + y * out_stride_y + z * out_stride_z;
+#else // !defined(IN_PLACE)
+#if defined(SRC1_IN_PLACE)
+ out_addr = in1_addr;
+#else //defined(SRC1_IN_PLACE)
+ out_addr = in2_addr;
+#endif //defined(SRC1_IN_PLACE)
+#endif // !defined(IN_PLACE)
+ // Load data
+ VEC_ACC_TYPE in1_data = CONVERT((VEC_DATA_TYPE(DATA_TYPE_IN1, VEC_SIZE_OUT))VLOAD(VEC_SIZE_IN1)(0, (__global DATA_TYPE_IN1 *)in1_addr), VEC_ACC_TYPE);
+ VEC_ACC_TYPE in2_data = CONVERT((VEC_DATA_TYPE(DATA_TYPE_IN2, VEC_SIZE_OUT))VLOAD(VEC_SIZE_IN2)(0, (__global DATA_TYPE_IN2 *)in2_addr), VEC_ACC_TYPE);
// Perform multiplication and store result
- vstore16(MUL_OP(in1_data, in2_data, scale, DATA_TYPE_OUT, 16), 0, (__global DATA_TYPE_OUT *)out.ptr);
+ VEC_OUT_TYPE out_data0 = MUL_OP(in1_data, in2_data, scale, DATA_TYPE_OUT, VEC_SIZE_OUT);
+ STORE_VECTOR_SELECT(out_data, DATA_TYPE_OUT, out_addr, VEC_SIZE_OUT, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif /* defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(ACC_DATA_TYPE) && defined(DATA_TYPE_OUT) */
-#if defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE)
+#if defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE_OUT)
-#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE)
-#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
-#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
+#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE_OUT)
+#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE_OUT)
+#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE_OUT)
/** Performs a pixelwise multiplication with float scale of quantized inputs.
*
@@ -138,17 +154,31 @@ __kernel void pixelwise_mul_int(
__kernel void pixelwise_mul_quantized(
TENSOR3D_DECLARATION(in1),
TENSOR3D_DECLARATION(in2),
+#if !defined(IN_PLACE)
TENSOR3D_DECLARATION(out),
+#endif // !defined(IN_PLACE)
const float scale)
{
- // Get pixels pointer
- Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1);
- Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+ size_t x = max((int)(get_global_id(0) * VEC_SIZE_OUT - (VEC_SIZE_OUT - VEC_SIZE_LEFTOVER) % VEC_SIZE_OUT), 0);
+ size_t y = get_global_id(1);
+ size_t z = get_global_id(2);
+
+ __global uchar *in1_addr = in1_ptr + in1_offset_first_element_in_bytes + x * in1_stride_x + y * in1_stride_y + z * in1_stride_z;
+ __global uchar *in2_addr = in2_ptr + in2_offset_first_element_in_bytes + x * in2_stride_x + y * in2_stride_y + z * in2_stride_z;
+ __global uchar *
+#if !defined(IN_PLACE)
+ out_addr = out_ptr + out_offset_first_element_in_bytes + x * out_stride_x + y * out_stride_y + z * out_stride_z;
+#else // !defined(IN_PLACE)
+#if defined(SRC1_IN_PLACE)
+ out_addr = in1_addr;
+#else //defined(SRC1_IN_PLACE)
+ out_addr = in2_addr;
+#endif //defined(SRC1_IN_PLACE)
+#endif // !defined(IN_PLACE)
// Load data
- VEC_INT in_a = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_OUT *)in1.ptr), VEC_INT);
- VEC_INT in_b = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_OUT *)in2.ptr), VEC_INT);
+ VEC_INT in_a = CONVERT((VEC_TYPE)(VLOAD(VEC_SIZE_IN1)(0, (__global DATA_TYPE_OUT *)in1_addr)), VEC_INT);
+ VEC_INT in_b = CONVERT((VEC_TYPE)(VLOAD(VEC_SIZE_IN2)(0, (__global DATA_TYPE_OUT *)in2_addr)), VEC_INT);
// Dequantize
#if defined(OFFSET_IN1)
@@ -165,10 +195,9 @@ __kernel void pixelwise_mul_quantized(
#else // defined(OFFSET_OUT)
const VEC_FLOAT qresf32 = (in1f32 * in2f32 * scale) / ((VEC_FLOAT)(float)SCALE_OUT);
#endif // defined(OFFSET_OUT)
- const VEC_TYPE res = CONVERT_SAT(CONVERT_DOWN(qresf32, VEC_INT), VEC_TYPE);
+ const VEC_TYPE res0 = CONVERT_SAT(CONVERT_DOWN(qresf32, VEC_INT), VEC_TYPE);
// Store result
- VSTORE(VEC_SIZE)
- (res, 0, (__global DATA_TYPE_OUT *)out.ptr);
+ STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE_OUT, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
-#endif /* defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE) */
+#endif /* defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE_OUT) */
diff --git a/src/core/CL/cl_kernels/qlstm_layer_normalization.cl b/src/core/CL/cl_kernels/common/qlstm_layer_normalization.cl
index 24cb111772..4494dd8cec 100644
--- a/src/core/CL/cl_kernels/qlstm_layer_normalization.cl
+++ b/src/core/CL/cl_kernels/common/qlstm_layer_normalization.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/quantization_layer.cl b/src/core/CL/cl_kernels/common/quantization_layer.cl
index 3538dae5f0..69cc288c25 100644
--- a/src/core/CL/cl_kernels/quantization_layer.cl
+++ b/src/core/CL/cl_kernels/common/quantization_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -80,8 +80,8 @@ __kernel void quantization_layer(
// Create scale and offset vectors
const VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) vscale = SCALE;
- const VEC_DATA_TYPE(int, VEC_SIZE) voffset = OFFSET;
-#else // defined(IS_FLOAT)
+ const VEC_DATA_TYPE(int, VEC_SIZE) voffset = OFFSET;
+#else // defined(IS_FLOAT)
// Load data
VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE)
val = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input.ptr);
diff --git a/src/core/CL/cl_kernels/range.cl b/src/core/CL/cl_kernels/common/range.cl
index d25d10e207..d25d10e207 100644
--- a/src/core/CL/cl_kernels/range.cl
+++ b/src/core/CL/cl_kernels/common/range.cl
diff --git a/src/core/CL/cl_kernels/reduction_operation.cl b/src/core/CL/cl_kernels/common/reduction_operation.cl
index b2e56928d0..99369be19a 100644
--- a/src/core/CL/cl_kernels/reduction_operation.cl
+++ b/src/core/CL/cl_kernels/common/reduction_operation.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,74 +25,31 @@
#include "helpers_asymm.h"
#if defined(FLOAT_DATA_TYPE)
-#define ISGREATER(x, y) isgreater(x, y)
-#define ISLESS(x, y) isless(x, y)
+#define ISGREATER(x, y) (SELECT_VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE))(isgreater(x, y))
+#define ISLESS(x, y) (SELECT_VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE))(isless(x, y))
+#define ISGREATER_SCALAR(x, y) (SELECT_DATA_TYPE(DATA_TYPE_PROMOTED))(isgreater(x, y))
+#define ISLESS_SCALAR(x, y) (SELECT_DATA_TYPE(DATA_TYPE_PROMOTED))(isless(x, y))
#else // !FLOAT_DATA_TYPE
#if defined(WIDTH)
#define ISGREATER(x, y) (x > y) ? 1 : 0
#define ISLESS(x, y) (x < y) ? 1 : 0
+#define ISGREATER_SCALAR ISGREATER
+#define ISLESS_SCALAR ISLESS
#else // !defined(WIDTH)
-#define ISGREATER(x, y) select((int16)0, (int16)-1, x > y)
-#define ISLESS(x, y) select((int16)0, (int16)-1, x < y)
+#define ISGREATER(x, y) select((VEC_DATA_TYPE(int, VEC_SIZE))0, (VEC_DATA_TYPE(int, VEC_SIZE)) - 1, x > y)
+#define ISLESS(x, y) select((VEC_DATA_TYPE(int, VEC_SIZE))0, (VEC_DATA_TYPE(int, VEC_SIZE)) - 1, x < y)
#endif // defined(WIDTH)
#endif // defined(FLOAT_DATA_TYPE)
-/** Calculate square sum of a vector
- *
- * @param[in] input Pointer to the first pixel.
- *
- * @return square sum of vector.
- */
-inline DATA_TYPE square_sum(__global const DATA_TYPE *input)
-{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = vload16(0, input);
-
- in *= in;
-
- in.s01234567 += in.s89ABCDEF;
- in.s0123 += in.s4567;
- in.s01 += in.s23;
-
- return (in.s0 + in.s1);
-}
-
-/** Calculate sum of a vector
- *
- * @param[in] input Pointer to the first pixel.
- *
- * @return sum of vector.
- */
-inline DATA_TYPE sum(__global const DATA_TYPE *input)
-{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = vload16(0, input);
-
- in.s01234567 += in.s89ABCDEF;
- in.s0123 += in.s4567;
- in.s01 += in.s23;
-
- return (in.s0 + in.s1);
-}
-
-/** Calculate product of a vector
- *
- * @param[in] input Pointer to the first pixel.
- *
- * @return product of vector.
- */
-inline DATA_TYPE product(__global const DATA_TYPE *input)
-{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = vload16(0, input);
+#if defined(WIDTH)
+#if defined(OPERATION)
- in.s01234567 *= in.s89ABCDEF;
- in.s0123 *= in.s4567;
- in.s01 *= in.s23;
+#define sum(in0, in1, size) (in0 + SUM_REDUCE(in1, size))
+#define square_sum(in0, in1, size) (in0 + SUM_REDUCE((in1 * in1), size))
+#define product(in0, in1, size) (in0 * PROD_REDUCE(in1, size))
+#define min_(in0, in1, size) (min(in0, MIN_REDUCE(in1, size)))
+#define max_(in0, in1, size) (max(in0, MAX_REDUCE(in1, size)))
- return (in.s0 * in.s1);
-}
-#if defined(OPERATION)
/** This kernel performs parallel reduction given an operation on x-axis.
*
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
@@ -101,89 +58,84 @@ inline DATA_TYPE product(__global const DATA_TYPE *input)
* @note The product flag must be passed at compile time using -DPROD if we want to compute the product, otherwise sum will be used
* @note The width size must be passed at compile time using -DWIDTH e.g. -DWIDTH=128 if we want to compute the mean value
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] partial_res_ptr The local buffer to hold partial result values. Supported data types: same as @p src_ptr
- * @param[in] partial_res_stride_x Stride of the output tensor in X dimension (in bytes)
- * @param[in] partial_res_step_x partial_res_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] partial_res_stride_y Stride of the output tensor in Y dimension (in bytes)
- * @param[in] partial_res_step_y partial_res_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] partial_res_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] local_results Local buffer for storing the partial result
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] output_ptr Pointer to the destination tensor. Supported data types: same as @p input
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
__kernel void reduction_operation_x(
- IMAGE_DECLARATION(src),
- IMAGE_DECLARATION(partial_res),
- __local DATA_TYPE *local_results)
+ TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Image partial_res = CONVERT_TO_IMAGE_STRUCT(partial_res);
+ int y = get_global_id(1);
+ int z = get_global_id(2);
- unsigned int lsize = get_local_size(0);
- unsigned int lid = get_local_id(0);
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + y * input_stride_y + z * input_stride_z;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + y * output_stride_y + z * output_stride_z;
- for(unsigned int y = 0; y < get_local_size(1); ++y)
- {
- local_results[lid] = OPERATION((__global DATA_TYPE *)offset(&src, 0, y));
- barrier(CLK_LOCAL_MEM_FENCE);
-
- // Perform parallel reduction
- for(unsigned int i = lsize >> 1; i > 0; i >>= 1)
- {
- if(lid < i)
- {
+#if !defined(MIN) && !defined(MAX)
#if defined(PROD)
- local_results[lid] *= local_results[lid + i];
-#else // !defined(PROD)
- local_results[lid] += local_results[lid + i];
+ DATA_TYPE res = (DATA_TYPE)1;
+#else // defined(PROD)
+ DATA_TYPE res = (DATA_TYPE)0;
#endif // defined(PROD)
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
-
- if(lid == 0)
- {
-#if defined(MEAN) && defined(WIDTH)
- if(y == get_local_size(1) - 1)
- {
- local_results[0] /= WIDTH;
- }
-#endif // defined(MEAN) && defined(WIDTH)
- ((__global DATA_TYPE *)offset(&partial_res, get_group_id(0), y))[0] = local_results[0];
- }
+#else // #if !defined(MIN) && !defined(MAX)
+ DATA_TYPE res = *((__global DATA_TYPE *)input_addr);
+#endif // #if defined(MIN) || defined(MAX)
+ int x = 0;
+
+ for(; x <= (WIDTH - VEC_SIZE); x += VEC_SIZE)
+ {
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ vals = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + x * sizeof(DATA_TYPE)));
+ res = OPERATION(res, vals, VEC_SIZE);
}
+
+#if(WIDTH % VEC_SIZE)
+ _Pragma("unroll") for(; x < WIDTH; ++x)
+ {
+ DATA_TYPE val = *((__global DATA_TYPE *)(input_addr + x * sizeof(DATA_TYPE)));
+ res = OPERATION(res, val, 1);
+ }
+#endif // (WIDTH % VEC_SIZE)
+
+#if defined(MEAN)
+ res /= WIDTH;
+#endif // defined(MEAN)
+ *((__global DATA_TYPE *)output_addr) = res;
}
#endif // defined(OPERATION)
-
-#if defined(WIDTH)
/** This kernel performs reduction on x-axis. (Non parallel)
*
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
* @note The width size must be passed at compile time using -DWIDTH e.g. -DWIDTH=128
* @note The product flag must be passed at compile time using -DPROD if we want to compute the product, otherwise sum will be used
- * @note In case of MIN and MAX the condition data type must be passed at compile time using -DCOND_DATA_TYPE e.g. -DCOND_DATA_TYPE=short
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: S32/F16/F32 and QASYMM8/QASYMM8_SIGNED for operation MEAN
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] output_ptr The local buffer to hold sumed values. Supported data types: same as @p src_ptr
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: S32/F16/F32 and QASYMM8/QASYMM8_SIGNED for operation MEAN
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] output_ptr The local buffer to hold sumed values. Supported data types: same as @p input_ptr
* @param[in] output_stride_x Stride of the output tensor in X dimension (in bytes)
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
*/
__kernel void reduction_operation_non_parallel_x(
- VECTOR_DECLARATION(src),
+ VECTOR_DECLARATION(input),
VECTOR_DECLARATION(output))
{
- Vector src = CONVERT_TO_VECTOR_STRUCT(src);
+ Vector input = CONVERT_TO_VECTOR_STRUCT(input);
Vector output = CONVERT_TO_VECTOR_STRUCT(output);
- DATA_TYPE_PROMOTED res = CONVERT(*((__global DATA_TYPE *)vector_offset(&src, 0)), DATA_TYPE_PROMOTED);
+ DATA_TYPE_PROMOTED res = CONVERT(*((__global DATA_TYPE *)vector_offset(&input, 0)), DATA_TYPE_PROMOTED);
// Convert input into F32 in order to perform quantized multiplication
#if defined(PROD) && defined(OFFSET) && defined(SCALE)
@@ -192,11 +144,11 @@ __kernel void reduction_operation_non_parallel_x(
for(unsigned int x = 1; x < WIDTH; ++x)
{
- DATA_TYPE_PROMOTED in = CONVERT(*((__global DATA_TYPE *)vector_offset(&src, x)), DATA_TYPE_PROMOTED);
+ DATA_TYPE_PROMOTED in = CONVERT(*((__global DATA_TYPE *)vector_offset(&input, x)), DATA_TYPE_PROMOTED);
#if defined(MIN)
- res = select(res, in, CONVERT(ISLESS(in, res), COND_DATA_TYPE));
+ res = select(res, in, ISLESS_SCALAR(in, res));
#elif defined(MAX)
- res = select(res, in, CONVERT(ISGREATER(in, res), COND_DATA_TYPE));
+ res = select(res, in, ISGREATER_SCALAR(in, res));
#elif defined(PROD)
#if defined(OFFSET) && defined(SCALE)
res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, 1);
@@ -233,32 +185,37 @@ __kernel void reduction_operation_non_parallel_x(
* @note The input data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
* @note The height size must be passed at compile time using -DHEIGHT e.g. -DHEIGHT=128
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] output_ptr The local buffer to hold sumed values. Supported data types: same as @p src_ptr
- * @param[in] output_stride_x Stride of the output tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the output tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] output_ptr The local buffer to hold sumed values. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_z Stride of the output tensor in Z dimension (in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
*/
__kernel void reduction_operation_y(
- IMAGE_DECLARATION(src),
- IMAGE_DECLARATION(output))
+ __global uchar *input_ptr,
+ uint input_stride_y,
+ uint input_stride_z,
+ uint input_offset_first_element_in_bytes,
+
+ __global uchar *output_ptr,
+ uint output_stride_z,
+ uint output_offset_first_element_in_bytes)
{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Image output = CONVERT_TO_IMAGE_STRUCT(output);
+ int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
+ int z = get_global_id(1);
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- res = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + z * input_stride_z;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + z * output_stride_z;
+
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+ res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
// Convert input into F32 in order to perform quantized multiplication
#if defined(PROD) && defined(OFFSET) && defined(SCALE)
- float16 res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
#endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
#if defined(SUM_SQUARE)
@@ -267,8 +224,8 @@ __kernel void reduction_operation_y(
for(unsigned int y = 1; y < HEIGHT; ++y)
{
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- in = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+ in = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + y * input_stride_y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
#if defined(MIN)
res = select(res, in, ISLESS(in, res));
#elif defined(MAX)
@@ -280,7 +237,7 @@ __kernel void reduction_operation_y(
#if defined(PROD)
#if defined(OFFSET) && defined(SCALE)
- res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+ res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
#else // !(defined(OFFSET) && defined(SCALE))
res *= in;
#endif // defined(OFFSET) && defined(SCALE)
@@ -302,11 +259,13 @@ __kernel void reduction_operation_y(
// Re-quantize
#if defined(PROD) && defined(OFFSET) && defined(SCALE)
- res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+ res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
#endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
// Store result
- vstore16(CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res0 = CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+ STORE_VECTOR_SELECT(res, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif // defined(HEIGHT)
@@ -317,54 +276,51 @@ __kernel void reduction_operation_y(
* @note The depth size must be passed at compile time using -DDEPTH e.g. -DDEPTH=128
*
* @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
* @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
* @param[in] output_ptr The local buffer to hold sumed values. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the output tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] output_stride_y Stride of the output tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the output tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_stride_w Stride of the output tensor in W dimension (in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
*/
__kernel void reduction_operation_z(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
+ __global uchar *input_ptr,
+ uint input_stride_y,
+ uint input_stride_z,
+ uint input_stride_w,
+ uint input_offset_first_element_in_bytes,
+
+ __global uchar *output_ptr,
+ uint output_stride_y,
+ uint output_stride_w,
+ uint output_offset_first_element_in_bytes)
{
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+ int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
+ int y = get_global_id(1);
+ int w = get_global_id(2);
+
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * input_stride_y + w * input_stride_w;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * output_stride_y + w * output_stride_w;
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+ res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
// Convert input into F32 in order to perform quantized multiplication
#if defined(PROD) && defined(OFFSET) && defined(SCALE)
- float16 res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
#endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
-#if defined(COMPLEX)
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- res1 = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 8, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
-#endif // defined(COMPLEX)
#if defined(SUM_SQUARE)
res *= res;
#endif // defined(SUM_SQUARE)
for(unsigned int z = 1; z < DEPTH; ++z)
{
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
-
-#if defined(COMPLEX)
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- in1 = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 8, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
-#endif // defined(COMPLEX)
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+ in = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + z * input_stride_z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
#if defined(MIN)
res = select(res, in, ISLESS(in, res));
@@ -377,16 +333,13 @@ __kernel void reduction_operation_z(
#if defined(PROD)
#if defined(OFFSET) && defined(SCALE)
- res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+ res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
#else // !(defined(OFFSET) && defined(SCALE))
res *= in;
#endif // defined(OFFSET) && defined(SCALE)
-#else // !defined(PROD)
+#else // !defined(PROD)
res += in;
-#if defined(COMPLEX)
- res1 += in1;
-#endif // defined(COMPLEX)
#endif // defined(PROD)
#endif // defined(MAX) || defined(MIN)
}
@@ -402,14 +355,14 @@ __kernel void reduction_operation_z(
// Re-quantize
#if defined(PROD) && defined(OFFSET) && defined(SCALE)
- res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+ res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
#endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
// Store result
- vstore16(CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
-#if defined(COMPLEX)
- vstore16(CONVERT(res1, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)tensor3D_offset(&output, 8, 0, 0));
-#endif // defined(COMPLEX)
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res0 = CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+
+ STORE_VECTOR_SELECT(res, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif /* defined(DEPTH) */
@@ -418,42 +371,51 @@ __kernel void reduction_operation_z(
*
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
* @note The batch size must be passed at compile time using -DBATCH e.g. -DBATCH=128
- * @note The depth size must be passed at compile time using -DBATCH e.g. -DDEPTH=128
+ * @note The depth size must be passed at compile time using -DDEPTH e.g. -DDEPTH=128
*
* @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] input_stride_v Stride of the source tensor in V dimension (in bytes)
* @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
* @param[in] output_ptr The local buffer to hold sumed values. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the output tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] output_stride_y Stride of the output tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] output_stride_z Stride of the output tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_stride_w Stride of the output tensor in W dimension (in bytes)
- * @param[in] output_step_w output_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] output_stride_v Stride of the output tensor in V dimension (in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
*/
__kernel void reduction_operation_w(
- TENSOR4D_DECLARATION(input),
- TENSOR4D_DECLARATION(output))
+ __global uchar *input_ptr,
+ uint input_stride_y,
+ uint input_stride_z,
+ uint input_stride_w,
+ uint input_stride_v,
+ uint input_offset_first_element_in_bytes,
+
+ __global uchar *output_ptr,
+ uint output_stride_y,
+ uint output_stride_z,
+ uint output_stride_v,
+ uint output_offset_first_element_in_bytes)
{
- Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT(input, DEPTH);
- Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DEPTH);
+ int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
+ int y = get_global_id(1);
+
+ int gid_2 = get_global_id(2);
+ int z = get_global_id(2) % DEPTH;
+ int v = get_global_id(2) / DEPTH;
+
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * input_stride_y + z * input_stride_z + v * input_stride_v;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * output_stride_y + z * output_stride_z + v * output_stride_v;
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+ res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
// Convert input into F32 in order to perform quantized multiplication
#if defined(PROD) && defined(OFFSET) && defined(SCALE)
- float16 res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
#endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
#if defined(SUM_SQUARE)
@@ -462,8 +424,8 @@ __kernel void reduction_operation_w(
for(unsigned int w = 1; w < BATCH; ++w)
{
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+ in = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + w * input_stride_w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
#if defined(MIN)
res = select(res, in, ISLESS(in, res));
@@ -476,7 +438,7 @@ __kernel void reduction_operation_w(
#if defined(PROD)
#if defined(OFFSET) && defined(SCALE)
- res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+ res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
#else // !(defined(OFFSET) && defined(SCALE))
res *= in;
#endif // defined(OFFSET) && defined(SCALE)
@@ -498,10 +460,12 @@ __kernel void reduction_operation_w(
// Re-quantize
#if defined(PROD) && defined(OFFSET) && defined(SCALE)
- res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+ res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
#endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
// Store result
- vstore16(CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res0 = CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+ STORE_VECTOR_SELECT(res, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif /* defined(BATCH) && defined(DEPTH) */
diff --git a/src/core/CL/cl_kernels/reshape_layer.cl b/src/core/CL/cl_kernels/common/reshape_layer.cl
index 2d6a7edade..c47664bf85 100644
--- a/src/core/CL/cl_kernels/reshape_layer.cl
+++ b/src/core/CL/cl_kernels/common/reshape_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,20 +51,20 @@ __kernel void reshape_layer(TENSOR3D_DECLARATION(input),
int2 input_shape,
int2 output_shape)
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
+ int out_x = get_global_id(0);
+ int out_y = get_global_id(1);
+ int out_z = get_global_id(2);
- int3 id = (int3)(get_global_id(0), get_global_id(1), get_global_id(2));
+ // Compute the output linearized index
+ int out_linear_idx = out_x + out_y * output_shape.x + out_z * output_shape.x * output_shape.y;
- // Linearize index
- int linear_idx = id.x + id.y * input_shape.x + id.z * input_shape.x * input_shape.y;
-
- // Translate to output
- int3 out_id;
- out_id.x = linear_idx % output_shape.x;
- out_id.y = (linear_idx / output_shape.x) % output_shape.y;
- out_id.z = linear_idx / (output_shape.x * output_shape.y);
+ // Translate to intput
+ int in_x = out_linear_idx % input_shape.x;
+ int in_y = (out_linear_idx / input_shape.x) % input_shape.y;
+ int in_z = out_linear_idx / (input_shape.x * input_shape.y);
// Store result
- *((__global DATA_TYPE *)tensor3D_offset(&out, out_id.x, out_id.y, out_id.z)) = *((__global DATA_TYPE *)in.ptr);
+ input_ptr += input_offset_first_element_in_bytes + in_x * input_stride_x + in_y * input_stride_y + in_z * input_stride_z;
+ output_ptr += output_offset_first_element_in_bytes + out_x * output_stride_x + out_y * output_stride_y + out_z * output_stride_z;
+ *((__global DATA_TYPE *)output_ptr) = *((__global DATA_TYPE *)input_ptr);
}
diff --git a/src/core/CL/cl_kernels/reverse.cl b/src/core/CL/cl_kernels/common/reverse.cl
index 10ffe84aeb..e6df3041c2 100644
--- a/src/core/CL/cl_kernels/reverse.cl
+++ b/src/core/CL/cl_kernels/common/reverse.cl
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,6 +33,8 @@
*
* @note The data type must be given as a preprocessor argument using -DDATA_TYPE=num. e.g. -DDATA_TYPE=uint
* @note The number of dimensions to reverse must be given as a preprocessor argument using -DNUM_REVERSE_DIMS=num, e.g. -DNUM_REVERSE_DIMS=3
+ * @note The number of dimensions of the source tensor must be given as a preprocessor argument using -DRANK=num, e.g. -DRANK=3
+ * @note The values in axis_tensor must be within [-rank, rank-1].
*
* @param[in] src_ptr Pointer to the source tensor. Supported data types: All
* @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
@@ -69,7 +71,7 @@ __kernel void reverse(TENSOR4D_DECLARATION(src),
{
Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, depth);
Vector axis = CONVERT_TO_VECTOR_STRUCT_NO_STEP(axis);
- Tensor4D dst = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(dst, depth);
+ Tensor4D dst = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(dst);
const uint x_in = get_global_id(0);
const uint y_in = get_global_id(1);
@@ -78,20 +80,24 @@ __kernel void reverse(TENSOR4D_DECLARATION(src),
const uint4 dims = (uint4)(0, 1, 2, 3);
int4 to_reverse = (int4)(0, 0, 0, 0);
+
+ VEC_DATA_TYPE(int, NUM_REVERSE_DIMS) indices = VLOAD(NUM_REVERSE_DIMS)(0,(__global int *)axis.ptr);
+#if defined(USE_INVERTED_AXIS)
+ indices = select((VEC_DATA_TYPE(int, NUM_REVERSE_DIMS)) RANK - 1, -1, indices < 0) - indices;
+#else /* defined(USE_INVERTED_AXIS) */
+ indices = select(indices, indices + RANK, indices < 0);
+#endif /* defined(USE_INVERTED_AXIS) */
+
#if NUM_REVERSE_DIMS == 1
- const uint index = *((__global uint *)axis.ptr);
- to_reverse = (uint4)index == dims;
+ to_reverse = ((uint4)indices == dims);
#elif NUM_REVERSE_DIMS == 2
- const uint2 indices = vload2(0, (__global uint *)axis.ptr);
- to_reverse = ((uint4)indices.s0 == dims) || ((uint4)indices.s1 == dims);
+ to_reverse = ((uint4)indices.s0 == dims) || ((uint4)indices.s1 == dims);
#elif NUM_REVERSE_DIMS == 3
- const uint2 indices01 = vload2(0, (__global uint *)axis.ptr);
- const uint index2 = *((__global uint *)axis.ptr + 2);
- to_reverse = ((uint4)indices01.s0 == dims) || ((uint4)indices01.s1 == dims) || ((uint4)index2 == dims);
-#else /* NUM_REVERSE_DIMS == 3 */
- const uint4 indices = vload4(0, (__global uint *)axis.ptr);
- to_reverse = ((uint4)indices.s0 == dims) || ((uint4)indices.s1 == dims) || ((uint4)indices.s2 == dims) || ((uint4)indices.s3 == dims);
+ to_reverse = ((uint4)indices.s0 == dims) || ((uint4)indices.s1 == dims) || ((uint4)indices.s2 == dims);
+#else /* NUM_REVERSE_DIMS == 1 */
+ to_reverse = ((uint4)indices.s0 == dims) || ((uint4)indices.s1 == dims) || ((uint4)indices.s2 == dims) || ((uint4)indices.s3 == dims);
#endif /* NUM_REVERSE_DIMS == 1 */
+
const uint x_out = to_reverse.s0 ? width - x_in - 1 : x_in;
const uint y_out = to_reverse.s1 ? height - y_in - 1 : y_in;
const uint z_out = to_reverse.s2 ? depth - z_in - 1 : z_in;
diff --git a/src/core/CL/cl_kernels/roi_align_layer.cl b/src/core/CL/cl_kernels/common/roi_align_layer.cl
index e0b98e68c9..8cfe5ddcb6 100644
--- a/src/core/CL/cl_kernels/roi_align_layer.cl
+++ b/src/core/CL/cl_kernels/common/roi_align_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -173,7 +173,7 @@ __kernel void roi_align_layer(
const float2 roi_bin_grid = SAMPLING_RATIO;
#else // !defined(SAMPLING_RATIO)
// Note that we subtract EPS_GRID before ceiling. This is to avoid situations where 1.000001 gets ceiled to 2.
- const float2 roi_bin_grid = ceil(bin_size - EPS_GRID);
+ const float2 roi_bin_grid = ceil(bin_size - EPS_GRID);
#endif // defined(SAMPLING_RATIO)
// Move input and output pointer across the fourth dimension
@@ -184,7 +184,7 @@ __kernel void roi_align_layer(
#if defined(NHWC)
__global DATA_TYPE *_output_ptr = (__global DATA_TYPE *)tensor3D_offset(&output, pz, px, py);
#else // !defined(NHWC)
- __global DATA_TYPE *_output_ptr = (__global DATA_TYPE *)tensor3D_offset(&output, px, py, pz);
+ __global DATA_TYPE *_output_ptr = (__global DATA_TYPE *)tensor3D_offset(&output, px, py, pz);
#endif // defined(NHWC)
*_output_ptr = (__global DATA_TYPE)roi_align_1x1(&input,
region_start.x,
diff --git a/src/core/CL/cl_kernels/roi_align_layer_quantized.cl b/src/core/CL/cl_kernels/common/roi_align_layer_quantized.cl
index d5c9a0d9bf..e75dee06f6 100644
--- a/src/core/CL/cl_kernels/roi_align_layer_quantized.cl
+++ b/src/core/CL/cl_kernels/common/roi_align_layer_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/roi_pooling_layer.cl b/src/core/CL/cl_kernels/common/roi_pooling_layer.cl
index ac193e8fb6..6899b952e0 100644
--- a/src/core/CL/cl_kernels/roi_pooling_layer.cl
+++ b/src/core/CL/cl_kernels/common/roi_pooling_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "helpers.h"
+#include "helpers_asymm.h"
#if DATA_SIZE == 32
#define VEC_SIZE 4
@@ -29,24 +30,41 @@
#elif DATA_SIZE == 16
#define VEC_SIZE 8
#define VEC_MAX vec8_max
-#else /* DATA_SIZE not equals 32 or 16 */
+#elif DATA_SIZE == 8
+#define VEC_SIZE 16
+#define VEC_MAX vec16_max
+#else /* DATA_SIZE not equals 8, 16, 32 */
#error "Unsupported data size"
#endif /* DATA_SIZE == 32 */
+// Define whether to use max (Quantized datatype) or fmax (Float) functions
+#if defined(OFFSET_OUT) && defined(SCALE_OUT)
+#define MAX(x, y) max(x, y)
+#else // !(defined(OFFSET_OUT) && defined(SCALE_OUT)
+#define MAX(x, y) fmax(x, y)
+#endif // defined(OFFSET_OUT) && defined(SCALE_OUT)
+
inline DATA_TYPE vec4_max(VEC_DATA_TYPE(DATA_TYPE, 4) vec)
{
VEC_DATA_TYPE(DATA_TYPE, 2)
- temp = fmax(vec.lo, vec.hi);
- return fmax(temp.x, temp.y);
+ temp = MAX(vec.lo, vec.hi);
+ return MAX(temp.x, temp.y);
}
inline DATA_TYPE vec8_max(VEC_DATA_TYPE(DATA_TYPE, 8) vec)
{
VEC_DATA_TYPE(DATA_TYPE, 4)
- temp = fmax(vec.lo, vec.hi);
+ temp = MAX(vec.lo, vec.hi);
return vec4_max(temp);
}
+inline DATA_TYPE vec16_max(VEC_DATA_TYPE(DATA_TYPE, 16) vec)
+{
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ temp = MAX(vec.lo, vec.hi);
+ return vec8_max(temp);
+}
+
/** Performs a roi pooling on a single output pixel.
*
* @param[in] input Pointer to input Tensor3D struct.
@@ -69,7 +87,8 @@ inline DATA_TYPE roi_pool_1x1(const Tensor3D *input, int region_start_x, int reg
{
int num_iter = (int)((region_end_x - region_start_x) / VEC_SIZE);
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- curr_max = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(-FLT_MAX);
+ curr_max = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(MIN_VALUE);
+
for(int j = region_start_y; j < region_end_y; ++j)
{
int i = region_start_x;
@@ -77,27 +96,34 @@ inline DATA_TYPE roi_pool_1x1(const Tensor3D *input, int region_start_x, int reg
{
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
val = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(input, i, j, pz));
- curr_max = fmax(val, curr_max);
+ curr_max = MAX(val, curr_max);
}
for(; i < region_end_x; ++i)
{
DATA_TYPE val = *(__global DATA_TYPE *)tensor3D_offset(input, i, j, pz);
- curr_max = fmax(curr_max, val);
+ curr_max = MAX(curr_max, val);
}
}
- return (DATA_TYPE)VEC_MAX(curr_max);
+
+ const DATA_TYPE temp = (DATA_TYPE)VEC_MAX(curr_max);
+
+#if defined(OFFSET_OUT) && defined(SCALE_OUT)
+ return QUANTIZE(temp, OFFSET_OUT, SCALE_OUT, DATA_TYPE, 1);
+#endif /* if quantized, requantize and return */
+
+ return temp;
}
}
/** Performs a roi pooling function.
*
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16, F32;
+ * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16, F32, QASYMM8;
* @note Datasize must be passed using -DDATA_SIZE e.g. -DDATA_SIZE=32;
* @note Input dimensions must be passed using -DMAX_DIM_X, -DMAX_DIM_Y and -DMAX_DIM_Z;
* @note Pooled region dimensions must be passed using -DPOOLED_DIM_X and -DPOOLED_DIM_Y;
* @note Spatial scale must be passed using -DSPATIAL_SCALE;
*
- * @param[in] input_ptr Pointer to the source image. Supported data types: F16, F32
+ * @param[in] input_ptr Pointer to the source image. Supported data types: F16, F32, QASYMM8
* @param[in] input_stride_x Stride of the source image in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
@@ -111,7 +137,7 @@ inline DATA_TYPE roi_pool_1x1(const Tensor3D *input, int region_start_x, int reg
* @param[in] rois_stride_y Stride of the ROIs tensor in Y dimension (in bytes)
* @param[in] rois_step_y Step of the ROIs tensor in Y dimension (in bytes)
* @param[in] rois_offset_first_element_in_bytes The offset of the first element in the ROIs tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: F16, F32
+ * @param[out] output_ptr Pointer to the destination image. Supported data types: same as input
* @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
@@ -139,9 +165,9 @@ __kernel void roi_pooling_layer(
// Load roi parameters
// roi is laid out as follows { batch_index, x1, y1, x2, y2 }
- const ushort roi_batch = (ushort) * ((__global DATA_TYPE *)offset(&rois, 0, pw));
- const VEC_DATA_TYPE(DATA_TYPE, 4)
- roi = vload4(0, (__global DATA_TYPE *)offset(&rois, 1, pw));
+ const ushort roi_batch = (ushort) * ((__global ushort *)offset(&rois, 0, pw));
+ const VEC_DATA_TYPE(ushort, 4)
+ roi = vload4(0, (__global ushort *)offset(&rois, 1, pw));
const int2 roi_anchor = convert_int2_sat(round(convert_float2(roi.s01) * (float)SPATIAL_SCALE));
const int2 roi_dims = convert_int2_sat(fmax(round(convert_float2(roi.s23 - roi.s01) * (float)SPATIAL_SCALE), 1.f));
diff --git a/src/core/CL/cl_kernels/common/scatter.cl b/src/core/CL/cl_kernels/common/scatter.cl
new file mode 100644
index 0000000000..e3ec9cc98e
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/scatter.cl
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h"
+
+// The below defines the various reduce operations for our purposes.
+// Where a corresponds to the existing value, and b the new value.
+#define ADD_OP(a, b) ((a) + (b))
+#define SUB_OP(a, b) ((a) - (b))
+
+#ifdef IS_FLOAT
+#define MAX_OP(a, b) fmax(a, b)
+#define MIN_OP(a, b) fmin(a, b)
+#else // ifdef IS_FLOAT
+#define MAX_OP(a, b) max(a, b)
+#define MIN_OP(a, b) min(a, b)
+#endif // ifdef IS_FLOAT
+
+#define UPDATE_OP(a, b) (b)
+
+#ifdef SCATTER_MP1D_2D_MPND
+
+/** This kernel performs scatter operation
+ *
+ * @note Datatype should be given as a compile-time argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
+ * @note Number of indices should be given as a compile-time argument using -DNUM_INDICES, e.g. -DNUM_INDICES=3
+ * @note Index length should be given as a compile-time argument using -DINDEX_LENGTH, e.g. -DINDEX_LENGTH=2
+ * @note Outermost output shapes should be given as a compile-time argument using -DOUT_SHAPE_N_MINUS_X, where
+ * X must be 1,2,3,4,5, e.g. -DOUT_SHAPE_N_MINUS_1=3, ...
+ * @note Number of elements to copy in a row should be given as a compile-time argument using -DN0, e.g. -DN0=4
+ * @note Number of partial elements at the edge to copy in a row should be given as a compile-time argument using
+ * -DPARTIAL_N0, e.g. -DPARTIAL_N0=2
+ * @note Scatter function should be given as a compile-time argument using -DSCATTER_FUNCTION, e.g. -DSCATTER_FUNCTION=ADD
+ * @note If the kernel should skip reading the output tensor, -DSKIP_OUTPUT_READ option should be provided.
+ * @note Kernel name in uppercase letters should be provided as a compile-time argument, e.g. -DSCATTER_MP1D_2D_MPND
+ *
+ * @param[in] updates_ptr Pointer to the updates tensor. Data Types: F32
+ * @param[in] updates_stride_x Stride of the updates tensor in X dimension (in bytes)
+ * @param[in] updates_step_x updates_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] updates_stride_y Stride of the updates tensor in Y dimension (in bytes)
+ * @param[in] updates_step_y updates_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] updates_offset_first_element_in_bytes The offset of the first element in the updates tensor
+ * @param[in] indices_ptr Pointer to the indices tensor. Data Types: S32
+ * @param[in] indices_stride_x Stride of the indices tensor in X dimension (in bytes)
+ * @param[in] indices_step_x indices_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] indices_stride_y Stride of the indices tensor in Y dimension (in bytes)
+ * @param[in] indices_step_y indices_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] indices_offset_first_element_in_bytes The offset of the first element in the indices tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Same as @p upt_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] upt_block_stride Update tensor data block stride in bytes
+ * @param[in] out_block_stride Output tensor data block stride in bytes
+ */
+__kernel void scatter_mp1d_2d_mpnd(
+ IMAGE_DECLARATION(updates),
+ IMAGE_DECLARATION(indices),
+ IMAGE_DECLARATION(output),
+ int upt_block_stride,
+ int out_block_stride
+ )
+{
+ const int out_shape[5] = {OUT_SHAPE_N_MINUS_1, OUT_SHAPE_N_MINUS_2, OUT_SHAPE_N_MINUS_3,
+ OUT_SHAPE_N_MINUS_4, OUT_SHAPE_N_MINUS_5};
+
+ const int x = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // x-coordinate in the tensor
+ const int y = get_global_id(1); // collapsed y-coordinate (ignoring the outermost dimensions)
+
+ const bool x_cond = (PARTIAL_N0 != 0 && get_global_id(0) == 0);
+
+ uchar *ind_ptr_raw = indices_ptr + indices_offset_first_element_in_bytes;
+ const uchar *out_ptr_raw = output_ptr + output_offset_first_element_in_bytes
+ + x * sizeof(DATA_TYPE) + y * output_stride_y;
+
+ const uchar *upt_ptr_raw = updates_ptr + updates_offset_first_element_in_bytes
+ + x * sizeof(DATA_TYPE) + y * updates_stride_y;
+
+ for(int index_element = 0; index_element < NUM_INDICES; ++index_element)
+ {
+ const int *ind_ptr = (const int *) (ind_ptr_raw);
+
+ // Out of bounds check
+ bool out_of_bounds = false;
+ LOOP_UNROLLING(int, i, 0, 1, INDEX_LENGTH,
+ {
+ if(ind_ptr[i] >= out_shape[i] || ind_ptr[i] < 0)
+ {
+ out_of_bounds = true;
+ }
+ });
+
+ ind_ptr_raw += indices_stride_y;
+
+ if(out_of_bounds)
+ {
+ continue;
+ }
+
+ // Index calculation
+ int index = 0;
+ LOOP_UNROLLING(int, i, 0, 1, INDEX_LENGTH,
+ {
+ index = index * out_shape[i] + ind_ptr[i];
+ });
+
+ DATA_TYPE *out_ptr = (DATA_TYPE *) (out_ptr_raw + index * out_block_stride);
+
+ const DATA_TYPE *upt_ptr = (const DATA_TYPE *) (upt_ptr_raw + index_element * upt_block_stride);
+
+ VEC_DATA_TYPE(DATA_TYPE, N0) data_in0 = VLOAD(N0)(0, (__global DATA_TYPE *) upt_ptr);
+
+#ifdef SKIP_OUTPUT_READ
+ STORE_VECTOR_SELECT(data_in, DATA_TYPE, (__global DATA_TYPE *) out_ptr, N0, PARTIAL_N0, x_cond);
+#else // ifdef SKIP_OUTPUT_READ
+ VEC_DATA_TYPE(DATA_TYPE, N0) data_out0 = VLOAD(N0)(0, (__global DATA_TYPE *) out_ptr);
+ data_out0 = SCATTER_FUNCTION(data_out0, data_in0);
+
+ STORE_VECTOR_SELECT(data_out, DATA_TYPE, (__global DATA_TYPE *) out_ptr, N0, PARTIAL_N0, x_cond);
+#endif // ifdef SKIP_OUTPUT_READ
+ }
+}
+
+#endif // SCATTER_MP1D_2D_MPND
+
+#ifdef SCATTER1D_PARALLEL
+
+// NOTE : This code is non-deterministic and can only be excecuted with the "update" ScatterFunction
+// This code is currently unusued as it requires changes to the existing test suite.
+/** Performs the Scatter1D operation with multiple threads.
+ * Similar to @ref scatter1D()
+ */
+__kernel void scatter1D_parallel(
+ TENSOR4D_DECLARATION(updates),
+ TENSOR4D_DECLARATION(indices),
+ TENSOR4D_DECLARATION(output))
+{
+ // Currently 1D - only iterate through x dimension of indices.
+ const int px = get_global_id(0);
+ const int index_value = *(uchar*)(indices_ptr + indices_offset_first_element_in_bytes + (sizeof(int) * px));
+
+ if(index_value < OUT_SHAPE_X)
+ {
+ const DATA_TYPE update = *(DATA_TYPE *)(updates_ptr + updates_offset_first_element_in_bytes + (sizeof(DATA_TYPE) * px));
+ __global uchar *out_addr = output_ptr + indices_offset_first_element_in_bytes + (sizeof(DATA_TYPE) * index_value);
+ *(__global DATA_TYPE *)(out_addr) = update;
+ }
+}
+
+#endif // SCATTER1D_PARALLEL
diff --git a/src/core/CL/cl_kernels/select.cl b/src/core/CL/cl_kernels/common/select.cl
index 4d22d5bf07..6fd4bd4ce3 100644
--- a/src/core/CL/cl_kernels/select.cl
+++ b/src/core/CL/cl_kernels/common/select.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -86,7 +86,7 @@ __kernel void select_same_rank(
// Calculate result
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- res0 = select(in_y, in_x, in_c > (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0);
+ res0 = select(in_y, in_x, CONVERT(in_c > (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0, SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)));
// Boundary-aware store
STORE_VECTOR_SELECT(res, DATA_TYPE, (__global DATA_TYPE *)out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
@@ -152,7 +152,7 @@ __kernel void select_different_rank_2(
// Calculate result
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- res0 = select(in_y, in_x, in_c > (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0);
+ res0 = select(in_y, in_x, CONVERT(in_c > (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0, SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)));
// Boundary-aware store
STORE_VECTOR_SELECT(res, DATA_TYPE, (__global DATA_TYPE *)out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
@@ -220,7 +220,7 @@ __kernel void select_different_rank_n(
// Calculate result
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- res0 = select(in_y, in_x, in_c > (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0);
+ res0 = select(in_y, in_x, CONVERT(in_c > (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0, SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)));
// Boundary-aware store
STORE_VECTOR_SELECT(res, DATA_TYPE, (__global DATA_TYPE *)out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
diff --git a/src/core/CL/cl_kernels/slice_ops.cl b/src/core/CL/cl_kernels/common/slice_ops.cl
index dc3ffd91c1..189d414aba 100644
--- a/src/core/CL/cl_kernels/slice_ops.cl
+++ b/src/core/CL/cl_kernels/common/slice_ops.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,7 +28,7 @@
* @attention Supported tensor rank: up to 4
*
* @attention Data type can be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float
- * @attention Input and output tensor dephts should be given as a preprocessor arguments using -DSRC_DEPTH=size. and -DDST_DEPTH=size
+ * @attention Output tensor depht should be given as a preprocessor arguments using -DDST_DEPTH=size
* @attention Absolute start coordinates for each dimension should be given as preprocessor -DSTART_index=value e.g. -DSTART_0=2
* @attention Strides for each dimension should be given as preprocessor -DSTRIDE_index=value e.g. -DSTRIDE_1=1
*
@@ -58,7 +58,7 @@ __kernel void strided_slice(
TENSOR4D_DECLARATION(output))
{
// Get pixels pointer
- Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, SRC_DEPTH);
+ Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DST_DEPTH);
int offset = 0;
diff --git a/src/core/CL/cl_kernels/common/softmax_layer.cl b/src/core/CL/cl_kernels/common/softmax_layer.cl
new file mode 100644
index 0000000000..bfc0995bb8
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/softmax_layer.cl
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "helpers.h"
+
+#define MIN_VALUE_float -FLT_MAX
+#define MIN_VALUE_half -HALF_MAX
+#define MIN_VALUE_char CHAR_MIN
+#define MIN_VALUE_uchar 0
+
+#define MIN_VALUE_TYPE_STR(data_type) MIN_VALUE_##data_type
+#define MIN_VALUE_TYPE(data_type) MIN_VALUE_TYPE_STR(data_type)
+#define MIN_VALUE MIN_VALUE_TYPE(DATA_TYPE)
+
+#ifdef SOFTMAX_X
+
+/** 3-pass softmax in the x dimension.
+ *
+ * List of preprocessors:
+ * - DATA_TYPE: the input/output data type.
+ * - TMP_DATA_TYPE: the data type used for computing and temporary tensor storage.
+ * If DATA_TYPE is quantized, TMP_DATA_TYPE is floating-point, otherwise TMP_DATA_TYPE is the same as DATA_TYPE.
+ * - IS_LOG (optional): indicating whether this is log softmax.
+ * - LENGTH: the number of elements in softmax axis in the input/output tensors.
+ * - BETA: the beta coefficient.
+ * - IS_QUANTIZED (optional): indicating whether the input/output data type is quantized data.
+ * - VEC_SIZE: the size of the vector.
+ *
+ * Additional preprocessors in case IS_QUANTIZED is present:
+ * - SRC_SCALE and SRC_OFFSET: the quantization information of the source tensor.
+ * - DST_SCALE and DST_OFFSET: the quantization information of the destination tensor.
+ *
+ * @param[in] src_ptr Pointer to the source tensor.
+ * @param[in] src_stride_0 Stride in bytes of the source tensor in the dimension corresponding to global ID 0.
+ * @param[in] src_stride_1 Stride in bytes of the source tensor in the dimension corresponding to global ID 1.
+ * @param[in] src_stride_2 Stride in bytes of the source tensor in the dimension corresponding to global ID 2.
+ * @param[in] src_offset_first_element Offset of the first element in the source tensor.
+ * @param[in] dst_ptr Pointer to the destination tensor.
+ * @param[in] dst_stride_0 Stride in bytes of the destination tensor in the dimension corresponding to global ID 0.
+ * @param[in] dst_stride_1 Stride in bytes of the destination tensor in the dimension corresponding to global ID 1.
+ * @param[in] dst_stride_2 Stride in bytes of the destination tensor in the dimension corresponding to global ID 2.
+ * @param[in] dst_offset_first_element Offset of the first element in the destination tensor.
+ * @param[in] tmp_ptr Pointer to the temporary tensor.
+ * @param[in] tmp_stride_0 Stride in bytes of the temporary tensor in the dimension corresponding to global ID 0.
+ * @param[in] tmp_stride_1 Stride in bytes of the temporary tensor in the dimension corresponding to global ID 1.
+ * @param[in] tmp_stride_2 Stride in bytes of the temporary tensor in the dimension corresponding to global ID 2.
+ * @param[in] tmp_offset_first_element Offset of the first element in the temporary tensor.
+ */
+__kernel void softmax_x(
+ __global uchar *src_ptr,
+ uint src_stride_0,
+ uint src_stride_1,
+ uint src_stride_2,
+ uint src_offset_first_element,
+
+ __global uchar *dst_ptr,
+ uint dst_stride_0,
+ uint dst_stride_1,
+ uint dst_stride_2,
+ uint dst_offset_first_element
+
+#ifdef IS_QUANTIZED
+ ,
+ __global uchar *tmp_ptr,
+ uint tmp_stride_0,
+ uint tmp_stride_1,
+ uint tmp_stride_2,
+ uint tmp_offset_first_element
+#endif // IS_QUANTIZED
+)
+{
+ const int dim_0 = get_global_id(0);
+ const int dim_1 = get_global_id(1);
+ const int dim_2 = get_global_id(2);
+
+ src_ptr += src_offset_first_element + dim_2 * src_stride_2 + dim_1 * src_stride_1 + dim_0 * src_stride_0;
+ dst_ptr += dst_offset_first_element + dim_2 * dst_stride_2 + dim_1 * dst_stride_1 + dim_0 * dst_stride_0;
+
+#ifdef IS_QUANTIZED
+ tmp_ptr += tmp_offset_first_element + dim_2 * tmp_stride_2 + dim_1 * tmp_stride_1 + dim_0 * tmp_stride_0;
+#else // IS_QUANTIZED
+ __global uchar *tmp_ptr = dst_ptr;
+#endif // IS_QUANTIZED
+
+ // Calculate max value.
+ DATA_TYPE max_value = MIN_VALUE;
+ int i = 0;
+
+ for (i = 0; i < LENGTH - VEC_SIZE; i += VEC_SIZE)
+ {
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_ptr + i * sizeof(DATA_TYPE)));
+
+ max_value = max(max_value, MAX_REDUCE(data, VEC_SIZE));
+ }
+
+ for (; i < LENGTH; ++i)
+ {
+ DATA_TYPE data = *(__global DATA_TYPE *)(src_ptr + i * sizeof(DATA_TYPE));
+
+ max_value = max(max_value, data);
+ }
+
+ // Regularize the data.
+ TMP_DATA_TYPE sum_value = 0;
+
+#ifdef IS_QUANTIZED
+ TMP_DATA_TYPE max_value_f = (CONVERT(max_value, TMP_DATA_TYPE) - SRC_OFFSET) * SRC_SCALE;
+ TMP_DATA_TYPE regularize_offset = -SRC_OFFSET * SRC_SCALE * (TMP_DATA_TYPE)BETA - max_value_f * (TMP_DATA_TYPE)BETA;
+# define REGULARIZE(x) ((x) * SRC_SCALE * (TMP_DATA_TYPE)BETA + regularize_offset)
+#else // IS_QUANTIZED
+# define REGULARIZE(x) (((x) - max_value) * (TMP_DATA_TYPE)BETA)
+#endif // IS_QUANTIZED
+
+ for (i = 0; i < LENGTH - VEC_SIZE; i += VEC_SIZE)
+ {
+ VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE) data = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_ptr + i * sizeof(DATA_TYPE))), VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE));
+
+ data = REGULARIZE(data);
+
+#ifdef IS_LOG
+ sum_value += SUM_REDUCE(exp(data), VEC_SIZE);
+#else // IS_LOG
+ data = exp(data);
+ sum_value += SUM_REDUCE(data, VEC_SIZE);
+#endif // IS_LOG
+
+ VSTORE(VEC_SIZE)(data, 0, (__global TMP_DATA_TYPE *)(tmp_ptr + i * sizeof(TMP_DATA_TYPE)));
+ }
+
+ for (; i < LENGTH; ++i)
+ {
+ TMP_DATA_TYPE data = CONVERT(*(__global DATA_TYPE *)(src_ptr + i * sizeof(DATA_TYPE)), TMP_DATA_TYPE);
+
+ data = REGULARIZE(data);
+
+#ifdef IS_LOG
+ sum_value += exp(data);
+#else // IS_LOG
+ data = exp(data);
+ sum_value += data;
+#endif // IS_LOG
+
+ *(__global TMP_DATA_TYPE *)(tmp_ptr + i * sizeof(TMP_DATA_TYPE)) = data;
+ }
+
+#undef REGULARIZE
+
+ // Normalize the data.
+#ifdef IS_QUANTIZED
+# if IS_LOG
+ TMP_DATA_TYPE norm_offset = -log(sum_value) + DST_OFFSET;
+# define NORMALIZE(SIZE, x) CONVERT_SAT_ROUND((x) / DST_SCALE + norm_offset, VEC_DATA_TYPE(DATA_TYPE, SIZE), rte)
+# else // IS_LOG
+ TMP_DATA_TYPE norm_div = sum_value * DST_SCALE;
+# define NORMALIZE(SIZE, x) CONVERT_SAT(add_sat(CONVERT_SAT_ROUND((x) / norm_div, VEC_DATA_TYPE(int, SIZE), rte), DST_OFFSET), VEC_DATA_TYPE(DATA_TYPE, SIZE))
+# endif // IS_LOG
+#else // IS_QUANTIZED
+# if IS_LOG
+# define NORMALIZE(SIZE, x) ((x) - log(sum_value))
+# else // IS_LOG
+# define NORMALIZE(SIZE, x) ((x) / sum_value)
+# endif // IS_LOG
+#endif // IS_QUANTIZED
+
+ for (i = 0; i < LENGTH - VEC_SIZE; i += VEC_SIZE)
+ {
+ VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE) data = VLOAD(VEC_SIZE)(0, (__global TMP_DATA_TYPE *)(tmp_ptr + i * sizeof(TMP_DATA_TYPE)));
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) result = NORMALIZE(VEC_SIZE, data);
+
+ VSTORE(VEC_SIZE)(result, 0, (__global DATA_TYPE *)(dst_ptr + i * sizeof(DATA_TYPE)));
+ }
+
+ for (; i < LENGTH; ++i)
+ {
+ TMP_DATA_TYPE data = *(__global TMP_DATA_TYPE *)(tmp_ptr + i * sizeof(TMP_DATA_TYPE));
+
+ DATA_TYPE result = NORMALIZE(1, data);
+
+ *(__global DATA_TYPE *)(dst_ptr + i * sizeof(DATA_TYPE)) = result;
+ }
+
+#undef NORMALIZE
+}
+
+#endif // SOFTMAX_X
+
+#ifdef SOFTMAX_NON_X
+
+/** 3-pass softmax in any dimension higher than the x dimension.
+ *
+ * List of preprocessors:
+ * - DATA_TYPE: the input/output data type.
+ * - TMP_DATA_TYPE: the data type used for computing and temporary tensor storage.
+ * If DATA_TYPE is quantized, TMP_DATA_TYPE is floating-point, otherwise TMP_DATA_TYPE is the same as DATA_TYPE.
+ * - IS_LOG (optional): indicating whether this is log softmax.
+ * - LENGTH: the number of elements in softmax axis in the input/output tensors.
+ * - BETA: the beta coefficient.
+ * - IS_QUANTIZED (optional): indicating whether the input/output data type is quantized data.
+ * - VEC_SIZE: the size of the vector.
+ * - VEC_SIZE_LEFTOVER: the size of the leftover part.
+ *
+ * Additional preprocessors in case IS_QUANTIZED is present:
+ * - SRC_SCALE and SRC_OFFSET: the quantization information of the source tensor.
+ * - DST_SCALE and DST_OFFSET: the quantization information of the destination tensor.
+ *
+ * @param[in] src_ptr Pointer to the source tensor.
+ * @param[in] src_stride_0 Stride in bytes of the source tensor in the dimension corresponding to global ID 0.
+ * @param[in] src_stride_1 Stride in bytes of the source tensor in the dimension corresponding to global ID 1.
+ * @param[in] src_stride_2 Stride in bytes of the source tensor in the dimension corresponding to global ID 2.
+ * @param[in] src_offset_first_element Offset of the first element in the source tensor.
+ * @param[in] dst_ptr Pointer to the destination tensor.
+ * @param[in] dst_stride_0 Stride in bytes of the destination tensor in the dimension corresponding to global ID 0.
+ * @param[in] dst_stride_1 Stride in bytes of the destination tensor in the dimension corresponding to global ID 1.
+ * @param[in] dst_stride_2 Stride in bytes of the destination tensor in the dimension corresponding to global ID 2.
+ * @param[in] dst_offset_first_element Offset of the first element in the destination tensor.
+ * @param[in] tmp_ptr Pointer to the temporary tensor.
+ * @param[in] tmp_stride_0 Stride in bytes of the temporary tensor in the dimension corresponding to global ID 0.
+ * @param[in] tmp_stride_1 Stride in bytes of the temporary tensor in the dimension corresponding to global ID 1.
+ * @param[in] tmp_stride_2 Stride in bytes of the temporary tensor in the dimension corresponding to global ID 2.
+ * @param[in] tmp_offset_first_element Offset of the first element in the temporary tensor.
+ */
+__kernel void softmax_non_x(
+ __global uchar *src_ptr,
+ uint src_stride_0,
+ uint src_stride_1,
+ uint src_stride_2,
+ uint src_offset_first_element,
+
+ __global uchar *dst_ptr,
+ uint dst_stride_0,
+ uint dst_stride_1,
+ uint dst_stride_2,
+ uint dst_offset_first_element,
+
+ __global uchar *tmp_ptr,
+ uint tmp_stride_0,
+ uint tmp_stride_1,
+ uint tmp_stride_2,
+ uint tmp_offset_first_element,
+
+ uint src_stride_axis,
+ uint dst_stride_axis
+)
+{
+ const int dim_0 = max((int)get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE, 0);
+ const int dim_1 = get_global_id(1);
+ const int dim_2 = get_global_id(2);
+
+ src_ptr += src_offset_first_element + dim_2 * src_stride_2 + dim_1 * src_stride_1 + dim_0 * src_stride_0;
+ dst_ptr += dst_offset_first_element + dim_2 * dst_stride_2 + dim_1 * dst_stride_1 + dim_0 * dst_stride_0;
+ tmp_ptr += tmp_offset_first_element + dim_2 * tmp_stride_2 + dim_1 * tmp_stride_1 + dim_0 * tmp_stride_0;
+
+ // In case of processing quantized data, i.e. DATA_TYPE is smaller than TMP_DATA_TYPE:
+ //
+ // In the first pass (finding max), the quantized data is copied from the input tensor to the temporary tensor.
+ // Dequantization is not needed to find the max value and since dequantization widens the data, we defer it
+ // to the second pass pass to reduce memory bandwidth of the first pass.
+ //
+ // In the second pass, it reads the quantized data from the temporary tensor and writes the dequantized data
+ // back to the temporary tensor.
+ //
+ // To avoid dequantized data overwritting the unprocessed quantized data in the temporary tensor,
+ // this extra offset is introduced to store the quantized data at the end of the temporary tensor.
+ //
+ // Note: Another approach is to perform the second pass in reverse order, but for unexplanable reason
+ // it doesn't work in some devices.
+ uint tmp_extra_offset = LENGTH * VEC_SIZE * (sizeof(TMP_DATA_TYPE) - sizeof(DATA_TYPE));
+
+ // Calculate max value and store the input data to the temporary tensor in suitable format.
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) max_value = MIN_VALUE;
+ int i = 0;
+
+ for (i = 0; i < LENGTH; ++i)
+ {
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_ptr + i * src_stride_axis));
+
+ max_value = max(max_value, data);
+
+ VSTORE(VEC_SIZE)(data, 0, (__global DATA_TYPE *)(tmp_ptr + tmp_extra_offset + i * VEC_SIZE * sizeof(DATA_TYPE)));
+ }
+
+ // Regularize the data.
+ VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE) sum_value = 0;
+
+#ifdef IS_QUANTIZED
+ VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE) max_value_f = (CONVERT(max_value, VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE)) - SRC_OFFSET) * SRC_SCALE;
+ VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE) regularize_offset = -SRC_OFFSET * SRC_SCALE * (TMP_DATA_TYPE)BETA - max_value_f * (TMP_DATA_TYPE)BETA;
+# define REGULARIZE(x) ((x) * SRC_SCALE * (TMP_DATA_TYPE)BETA + regularize_offset)
+#else // IS_QUANTIZED
+# define REGULARIZE(x) (((x) - max_value) * (TMP_DATA_TYPE)BETA)
+#endif // IS_QUANTIZED
+
+ for (i = 0; i < LENGTH; ++i)
+ {
+ VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE) data = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(tmp_ptr + tmp_extra_offset + i * VEC_SIZE * sizeof(DATA_TYPE))), VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE));
+
+ data = REGULARIZE(data);
+
+#ifdef IS_LOG
+ sum_value += exp(data);
+#else // IS_LOG
+ data = exp(data);
+ sum_value += data;
+#endif // IS_LOG
+
+ VSTORE(VEC_SIZE)(data, 0, (__global TMP_DATA_TYPE *)(tmp_ptr + i * VEC_SIZE * sizeof(TMP_DATA_TYPE)));
+ }
+
+#undef REGULARIZE
+
+ // Normalize the data.
+#ifdef IS_QUANTIZED
+# if IS_LOG
+ VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE) norm_offset = -log(sum_value) + DST_OFFSET;
+# define NORMALIZE(x) CONVERT_SAT_ROUND((x) / DST_SCALE + norm_offset, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE), rte)
+# else // IS_LOG
+ VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE) norm_div = sum_value * DST_SCALE;
+# define NORMALIZE(x) CONVERT_SAT(add_sat(CONVERT_SAT_ROUND((x) / norm_div, VEC_DATA_TYPE(int, VEC_SIZE), rte), DST_OFFSET), VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))
+# endif // IS_LOG
+#else // IS_QUANTIZED
+# if IS_LOG
+# define NORMALIZE(x) ((x) - log(sum_value))
+# else // IS_LOG
+# define NORMALIZE(x) ((x) / sum_value)
+# endif // IS_LOG
+#endif // IS_QUANTIZED
+
+ for (i = 0; i < LENGTH; ++i)
+ {
+ VEC_DATA_TYPE(TMP_DATA_TYPE, VEC_SIZE) data = VLOAD(VEC_SIZE)(0, (__global TMP_DATA_TYPE *)(tmp_ptr + i * VEC_SIZE * sizeof(TMP_DATA_TYPE)));
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) result0 = NORMALIZE(data);
+
+ STORE_VECTOR_SELECT(result, DATA_TYPE, dst_ptr + i * dst_stride_axis, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
+ }
+
+#undef NORMALIZE
+}
+
+#endif // SOFTMAX_NON_X
+
+#undef MIN_VALUE
+#undef MIN_VALUE_TYPE
+#undef MIN_VALUE_TYPE_STR
+
+#undef MIN_VALUE_float
+#undef MIN_VALUE_half
+#undef MIN_VALUE_char
+#undef MIN_VALUE_uchar
diff --git a/src/core/CL/cl_kernels/stack_layer.cl b/src/core/CL/cl_kernels/common/stack_layer.cl
index 438e858df2..2468bf750d 100644
--- a/src/core/CL/cl_kernels/stack_layer.cl
+++ b/src/core/CL/cl_kernels/common/stack_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/tile.cl b/src/core/CL/cl_kernels/common/tile.cl
index 79da7fe6b9..4d8f802ea1 100644
--- a/src/core/CL/cl_kernels/tile.cl
+++ b/src/core/CL/cl_kernels/common/tile.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,8 +50,8 @@ __kernel void tile(
TENSOR4D_DECLARATION(input),
TENSOR4D_DECLARATION(output))
{
- Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DST_DEPTH);
- Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, SRC_DEPTH);
+ Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output);
+ Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
// For all coordinates but x, each tile copies from the input
const int y = get_global_id(1);
@@ -62,22 +62,18 @@ __kernel void tile(
// If we are loading/storing multiple elements at time, we need to
// not exceed the input boundaries. The last threads need to backtrack
// of OFFSET elements. Those elements cumulates for previous tiles
- const int id = (int)(get_global_id(0));
- int x = id * VEC_SIZE;
- // Shift x based on the previous offsets
- const int tile_number = x / SRC_WIDTH;
- x -= (tile_number) * OFFSET;
- int x_input = x % SRC_WIDTH;
+ const int id = (int)(get_global_id(0));
+ const int multiple_no = id / SRC_WIDTH_TILES;
+ const int tile_no = id % SRC_WIDTH_TILES;
+ const int last_tile = (int)(tile_no == SRC_WIDTH_TILES - 1);
- // Shift x based on being the last tile
- const int last_tile = (int)(x_input + VEC_SIZE > SRC_WIDTH);
- x -= last_tile * OFFSET;
- x_input = x % SRC_WIDTH;
- output.ptr -= (tile_number + last_tile) * OFFSET * output_stride_x;
+ const int x_input = tile_no * VEC_SIZE - last_tile * OFFSET;
+ const int x_output = multiple_no * SRC_WIDTH + x_input;
- // Update the input pointer
- input.ptr = tensor4D_offset(&input, x_input, y % SRC_HEIGHT, z % SRC_DEPTH, batch % SRC_BATCHES);
+ // Update the input and output pointers.
+ input.ptr = tensor4D_offset(&input, x_input, y % SRC_HEIGHT, z % SRC_DEPTH, batch % SRC_BATCHES);
+ output.ptr = tensor4D_offset(&output, x_output, y, z, batch);
// Copy the data
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
@@ -88,8 +84,9 @@ __kernel void tile(
#else // !defined(VEC_SIZE) || !defined(OFFSET)
const int x = get_global_id(0);
- // Update the input pointer
- input.ptr = tensor4D_offset(&input, x % SRC_WIDTH, y % SRC_HEIGHT, z % SRC_DEPTH, batch % SRC_BATCHES);
+ // Update the input and output pointers.
+ input.ptr = tensor4D_offset(&input, x % SRC_WIDTH, y % SRC_HEIGHT, z % SRC_DEPTH, batch % SRC_BATCHES);
+ output.ptr = tensor4D_offset(&output, x, y, z, batch);
*((__global DATA_TYPE *)(output.ptr)) = *((__global DATA_TYPE *)(input.ptr));
#endif // defined(VEC_SIZE) && defined(OFFSET)
diff --git a/src/core/CL/cl_kernels/transpose.cl b/src/core/CL/cl_kernels/common/transpose.cl
index 82db2908b5..5b4c68ca10 100644
--- a/src/core/CL/cl_kernels/transpose.cl
+++ b/src/core/CL/cl_kernels/common/transpose.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -124,23 +124,28 @@
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source matrix in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination matrix in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_gx_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void transpose(IMAGE_DECLARATION(src),
- IMAGE_DECLARATION(dst))
+__kernel void transpose(TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
{
uint x_offs = max((int)(get_global_id(0) * VEC_SIZE_X - (VEC_SIZE_X - VEC_SIZE_LEFTOVER_X) % VEC_SIZE_X), 0);
uint y_offs = max((int)(get_global_id(1) * VEC_SIZE_Y - (VEC_SIZE_Y - VEC_SIZE_LEFTOVER_Y) % VEC_SIZE_Y), 0);
+ uint z_offs = get_global_id(2);
// Compute addresses
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs * DATA_TYPE_IN_BYTES + y_offs * src_stride_y;
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + y_offs * DATA_TYPE_IN_BYTES + x_offs * dst_stride_y;
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs * DATA_TYPE_IN_BYTES + y_offs * src_stride_y + z_offs * src_stride_z;
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + y_offs * DATA_TYPE_IN_BYTES + x_offs * dst_stride_y + z_offs * dst_stride_z;
// Load the NxM block at (x, y)
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X)
@@ -237,4 +242,4 @@ __kernel void transpose(IMAGE_DECLARATION(src),
VEC_SIZE_LEFTOVER_Y != 0 && get_global_id(1) == 0);
}
-#endif // defined(DATA_TYPE_IN_BYTES) && defined(VEC_SIZE_X) && defined(VEC_SIZE_LEFTOVER_X) && defined(VEC_SIZE_Y) && defined(VEC_SIZE_LEFTOVER_Y) \ No newline at end of file
+#endif // defined(DATA_TYPE_IN_BYTES) && defined(VEC_SIZE_X) && defined(VEC_SIZE_LEFTOVER_X) && defined(VEC_SIZE_Y) && defined(VEC_SIZE_LEFTOVER_Y)
diff --git a/src/core/CL/cl_kernels/unpooling_layer.cl b/src/core/CL/cl_kernels/common/unpooling_layer.cl
index 457e9bf8f1..6662dc9360 100644
--- a/src/core/CL/cl_kernels/unpooling_layer.cl
+++ b/src/core/CL/cl_kernels/common/unpooling_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/comparisons.cl b/src/core/CL/cl_kernels/comparisons.cl
deleted file mode 100644
index 408846144d..0000000000
--- a/src/core/CL/cl_kernels/comparisons.cl
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#define EQUAL(x, y) ((x) == (y))
-#define NOTEQUAL(x, y) ((x) != (y))
-#define GREATER(x, y) ((x) > (y))
-#define GREATEREQUAL(x, y) ((x) >= (y))
-#define LESS(x, y) ((x) < (y))
-#define LESSEQUAL(x, y) ((x) <= (y))
-
-#define DEFINE_KERNEL_STR(name) compare_##name
-#define DEFINE_KERNEL(name) DEFINE_KERNEL_STR(name)
-
-#define DEFINE_KERNEL_QUANTIZED_STR(name) compare_##name##_quantized
-#define DEFINE_KERNEL_QUANTIZED(name) DEFINE_KERNEL_QUANTIZED_STR(name)
-
-#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(OP) && defined(OP_NAME)
-/** This function compares two tensors.
- *
- * @attention The inputs' data type need to be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @attention The comparison operation should be given as a preprocessor argument using -DOP=operation. e.g. -DOP=LESS
- *
- * @param[in] in1_ptr Pointer to the source tensor. Supported data types: All non-quantized data types.
- * @param[in] in1_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] in1_step_x in1_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in1_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] in1_step_y in1_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in1_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] in1_step_z in1_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] in1_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] in2_ptr Pointer to the source tensor. Supported data types: same as @p in1_ptr
- * @param[in] in2_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] in2_step_x in2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in2_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] in2_step_y in2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in2_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] in2_step_z in2_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] in2_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] out_ptr Pointer to the destination tensor. Supported data types: U8
- * @param[in] out_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] out_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void DEFINE_KERNEL(OP_NAME)(
- TENSOR3D_DECLARATION(in1),
- TENSOR3D_DECLARATION(in2),
- TENSOR3D_DECLARATION(out))
-{
- // Get pixels pointer
- Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1);
- Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
-
- // Load values
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- in_a = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in1.ptr);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- in_b = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in2.ptr);
-
- // Calculate and store result
- VSTORE(VEC_SIZE)
- (CONVERT(OP(in_a, in_b), VEC_DATA_TYPE(uchar, VEC_SIZE)), 0, (__global uchar *)out.ptr);
-}
-#endif /* defined(DATA_TYPE) && defined(VEC_SIZE) && defined(OP) && defined(OP_NAME) */
-
-#if defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(SCALE_IN1) && defined(SCALE_IN2)
-/** This function compares two quantized tensors.
- *
- * @note The inputs' data type need to be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=uchar
- * @note The quantization offset of the first operand must be passed at compile time using -DOFFSET_IN1, i.e. -DOFFSET_IN1=10
- * @note The quantization offset of the second operand must be passed at compile time using -DOFFSET_IN2, i.e. -DOFFSET_IN2=10
- * @note The quantization scale of the first operand must be passed at compile time using -DSCALE_IN1, i.e. -DSCALE_IN1=10
- * @note The quantization scale of the second operand must be passed at compile time using -DSCALE_IN2, i.e. -DSCALE_IN2=10
- *
- * @param[in] in1_ptr Pointer to the source tensor. Supported data types: All quantized data types.
- * @param[in] in1_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] in1_step_x in1_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in1_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] in1_step_y in1_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in1_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] in1_step_z in1_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] in1_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] in2_ptr Pointer to the source tensor. Supported data types: same as @p in1_ptr
- * @param[in] in2_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] in2_step_x in2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in2_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] in2_step_y in2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in2_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] in2_step_z in2_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] in2_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] out_ptr Pointer to the destination tensor. Supported data types: U8
- * @param[in] out_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] out_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void DEFINE_KERNEL_QUANTIZED(OP_NAME)(
- TENSOR3D_DECLARATION(in1),
- TENSOR3D_DECLARATION(in2),
- TENSOR3D_DECLARATION(out))
-{
- // Get pixels pointer
- Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1);
- Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
-
- int16 in_a = CONVERT(vload16(0, (__global DATA_TYPE *)in1.ptr), int16);
- int16 in_b = CONVERT(vload16(0, (__global DATA_TYPE *)in2.ptr), int16);
-
- in_a = in_a - (int16)((int)OFFSET_IN1);
- in_b = in_b - (int16)((int)OFFSET_IN2);
-
- const float16 in1f32 = convert_float16(in_a) * (float16)((float)SCALE_IN1);
- const float16 in2f32 = convert_float16(in_b) * (float16)((float)SCALE_IN2);
- const int16 res = OP(in1f32, in2f32);
-
- // Store result
- vstore16(convert_uchar16(res), 0, (__global uchar *)out.ptr);
-}
-#endif /* defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(SCALE_IN1) && defined(SCALE_IN2) */ \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/depthwise_convolution.cl b/src/core/CL/cl_kernels/depthwise_convolution.cl
deleted file mode 100644
index 8ce5617858..0000000000
--- a/src/core/CL/cl_kernels/depthwise_convolution.cl
+++ /dev/null
@@ -1,1879 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#include "activation_float_helpers.h"
-
-/** Get the pointer position at a certain offset in x and y direction.
- *
- * @param[in] ptr Pointer to the starting position of the buffer
- * @param[in] x Relative X position
- * @param[in] y Relative Y position
- * @param[in] stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] stride_y Stride of the source tensor in Y dimension (in bytes)
- *
- * @return a uchar
- */
-inline __global uchar *ptr_offset(__global uchar *ptr, const int x, const int y, const int stride_x, const int stride_y)
-{
- return ptr + x * stride_x + y * stride_y;
-}
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
-#define CONVOLUTION1x3_BIFROST2X1_STRIDE1(acc, src0, weights_row0) \
- ({ \
- acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0.s1, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0.s2, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0.s3, weights_row0.s2, acc.s1); \
- })
-
-#define CONVOLUTION1x3_BIFROST4X1_STRIDE1(acc, src0, weights_row0) \
- ({ \
- acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0.s1, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0.s2, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0.s3, weights_row0.s2, acc.s1); \
- acc.s2 = fma(src0.s2, weights_row0.s0, acc.s2); \
- acc.s2 = fma(src0.s3, weights_row0.s1, acc.s2); \
- acc.s2 = fma(src0.s4, weights_row0.s2, acc.s2); \
- acc.s3 = fma(src0.s3, weights_row0.s0, acc.s3); \
- acc.s3 = fma(src0.s4, weights_row0.s1, acc.s3); \
- acc.s3 = fma(src0.s5, weights_row0.s2, acc.s3); \
- })
-
-#define CONVOLUTION1x3_BIFROST2X1_STRIDE2(acc, src0, src1, weights_row0) \
- ({ \
- acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0.s2, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0.s3, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src1.s0, weights_row0.s2, acc.s1); \
- })
-
-#define CONVOLUTION1x3_BIFROST4X1_STRIDE2(acc, src0, src1, weights_row0) \
- ({ \
- acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0.s2, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0.s3, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0.s4, weights_row0.s2, acc.s1); \
- acc.s2 = fma(src0.s4, weights_row0.s0, acc.s2); \
- acc.s2 = fma(src0.s5, weights_row0.s1, acc.s2); \
- acc.s2 = fma(src0.s6, weights_row0.s2, acc.s2); \
- acc.s3 = fma(src0.s6, weights_row0.s0, acc.s3); \
- acc.s3 = fma(src0.s7, weights_row0.s1, acc.s3); \
- acc.s3 = fma(src1.s0, weights_row0.s2, acc.s3); \
- })
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
-#define CONVOLUTION1x3_BIFROST2X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \
- ({ \
- acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \
- })
-
-#define CONVOLUTION1x3_BIFROST2X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \
- ({ \
- acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \
- })
-
-#define CONVOLUTION1x3_BIFROST4X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \
- ({ \
- acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \
- acc.s2 = fma(src0_left.s2, weights_row0.s0, acc.s2); \
- acc.s2 = fma(src0_mid.s2, weights_row0.s1, acc.s2); \
- acc.s2 = fma(src0_right.s2, weights_row0.s2, acc.s2); \
- acc.s3 = fma(src0_left.s3, weights_row0.s0, acc.s3); \
- acc.s3 = fma(src0_mid.s3, weights_row0.s1, acc.s3); \
- acc.s3 = fma(src0_right.s3, weights_row0.s2, acc.s3); \
- })
-
-#define CONVOLUTION1x3_BIFROST4X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \
- ({ \
- acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \
- acc.s2 = fma(src0_left.s4, weights_row0.s0, acc.s2); \
- acc.s2 = fma(src0_mid.s4, weights_row0.s1, acc.s2); \
- acc.s2 = fma(src0_right.s4, weights_row0.s2, acc.s2); \
- acc.s3 = fma(src0_left.s6, weights_row0.s0, acc.s3); \
- acc.s3 = fma(src0_mid.s6, weights_row0.s1, acc.s3); \
- acc.s3 = fma(src0_right.s6, weights_row0.s2, acc.s3); \
- })
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#if defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F32)
-#if defined(CONV_STRIDE_X)
-
-#if CONV_STRIDE_X == 1
-#define convolution1x3 convolution1x3_stride_1
-#elif CONV_STRIDE_X == 2
-#define convolution1x3 convolution1x3_stride_2
-#elif CONV_STRIDE_X == 3
-#define convolution1x3 convolution1x3_stride_3
-#else /* CONV_STRIDE_X */
-#error "Stride not supported"
-#endif /* CONV_STRIDE_X */
-
-/** Compute a 1D horizontal convolution of size 3 and stride 1 for floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a float2 containing 2 convoluted values.
- */
-inline float2 convolution1x3_stride_1(__global const uchar *left_pixel,
- const float left_coeff,
- const float middle_coeff,
- const float right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
- float4 temp = vload4(0, (__global float *)left_pixel);
-
- float2 left = CONVERT(temp.s01, float2);
- float2 middle = CONVERT(temp.s12, float2);
- float2 right = CONVERT(temp.s23, float2);
- return left * (float2)left_coeff + middle * (float2)middle_coeff + right * (float2)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
- return vload2(0, (__global float *)left_pixel) * (float2)left_coeff
- + vload2(0, (__global float *)(left_pixel) + DILATION_X) * (float2)middle_coeff
- + vload2(0, (__global float *)(left_pixel) + 2 * DILATION_X) * (float2)right_coeff;
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Compute a 1D horizontal convolution of size 3 and stride 2 for floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a float2 containing 2 convoluted values.
- */
-inline float2 convolution1x3_stride_2(__global const uchar *left_pixel,
- const float left_coeff,
- const float middle_coeff,
- const float right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
- float4 temp0 = vload4(0, (__global float *)left_pixel);
- float temp1 = *((__global float *)(left_pixel + 4 * sizeof(float)));
-
- float2 left = CONVERT(temp0.s02, float2);
- float2 middle = CONVERT(temp0.s13, float2);
- float2 right = CONVERT((float2)(temp0.s2, temp1), float2);
-
- return left * (float2)left_coeff + middle * (float2)middle_coeff + right * (float2)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
- __global float *left_pixel_float = (__global float *)left_pixel;
-
- return vload4(0, left_pixel_float).s02 * (float2)left_coeff
- + vload4(0, left_pixel_float + DILATION_X).s02 * (float2)middle_coeff
- + vload4(0, left_pixel_float + DILATION_X * 2).s02 * (float2)right_coeff;
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Compute a 1D horizontal convolution of size 3 and stride 3 for floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a float2 containing 2 convoluted values.
- */
-inline float2 convolution1x3_stride_3(__global const uchar *left_pixel,
- const float left_coeff,
- const float middle_coeff,
- const float right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
- float4 temp0 = vload4(0, (__global float *)left_pixel);
- float2 temp1 = vload2(0, (__global float *)(left_pixel + 4 * sizeof(float)));
-
- float2 left = CONVERT(temp0.s03, float2);
- float2 middle = CONVERT((float2)(temp0.s1, temp1.s0), float2);
- float2 right = CONVERT((float2)(temp0.s2, temp1.s1), float2);
-
- return left * (float2)left_coeff + middle * (float2)middle_coeff + right * (float2)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
- __global float *left_pixel_float = (__global float *)left_pixel;
-
- return (float2)(*left_pixel_float, *(left_pixel_float + 3)) * (float2)left_coeff
- + (float2)(*(left_pixel_float + DILATION_X), *(left_pixel_float + DILATION_X + 3)) * (float2)middle_coeff
- + (float2)(*(left_pixel_float + DILATION_X * 2), *(left_pixel_float + DILATION_X * 2 + 3)) * (float2)right_coeff;
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Apply a 3x3 convolution matrix to a single channel F32 input image and return the result.
- *
- * Convolution matrix layout:
- *
- * [ mat0, mat1, mat2 ]\n
- * [ mat3, mat4, mat5 ]\n
- * [ mat6, mat7, mat8 ]\n
- *
- * @param[in] src A pointer to source Image structure
- * @param[in] mat0 Coefficient from the convolution matrix
- * @param[in] mat1 Coefficient from the convolution matrix
- * @param[in] mat2 Coefficient from the convolution matrix
- * @param[in] mat3 Coefficient from the convolution matrix
- * @param[in] mat4 Coefficient from the convolution matrix
- * @param[in] mat5 Coefficient from the convolution matrix
- * @param[in] mat6 Coefficient from the convolution matrix
- * @param[in] mat0 Coefficient from the convolution matrix
- * @param[in] mat7 Coefficient from the convolution matrix
- * @param[in] mat8 Coefficient from the convolution matrix
- *
- * @return a float2 containing 2 convoluted values.
- */
-inline float2 convolution3x3(
- __global const uchar *src,
- unsigned int src_stride_y,
- const float mat0, const float mat1, const float mat2,
- const float mat3, const float mat4, const float mat5,
- const float mat6, const float mat7, const float mat8)
-{
- float2 pixels;
-
- pixels = convolution1x3((src + 0 * DILATION_Y * src_stride_y), mat0, mat1, mat2);
- pixels += convolution1x3((src + 1 * DILATION_Y * src_stride_y), mat3, mat4, mat5);
- pixels += convolution1x3((src + 2 * DILATION_Y * src_stride_y), mat6, mat7, mat8);
-
- return pixels;
-}
-
-/** This OpenCL kernel computes the depthwise convolution 3x3
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: F32
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F16/F32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- float2 pixels = 0.0f;
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
-
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
-
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
- // Load the weights
- float3 weights_values0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_values1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_values2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- pixels = convolution3x3(src_addr, src_stride_y,
- weights_values0.s0, weights_values0.s1, weights_values0.s2,
- weights_values1.s0, weights_values1.s1, weights_values1.s2,
- weights_values2.s0, weights_values2.s1, weights_values2.s2);
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- float bias = *((__global float *)(vector_offset(&biases, channel)));
-
- pixels += (float2)bias;
-#endif //defined(HAS_BIAS)
-
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels, A_VAL, B_VAL), 0, (__global float *)dst.ptr);
-}
-#endif //defined(CONV_STRIDE_X)
-
-#if(DILATION_X > 1 || DILATION_Y > 1)
-
-/** Perform 3x3 convolution for stride_x=1 and stride_y=1 when DILATION_X>1 or DILATION_Y>1 for F32
- *
- * @param[in] src_addr Pointer to the starting position of where to perform the convolution
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] y_offset Offset from the source tensor from which to start convolution
- * @param[in] weights_addr Pointer from where to get weights
- * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension
- */
-inline float2 convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes,
- const int y_offset, __global uchar *weights_addr, const int weights_stride_y)
-{
- // Load the weights
- float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- float2 pixels0 = 0.0f;
-
- float2 src00_left = vload2(0, (__global float *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0
- float2 src00_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
- float2 src00_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
-
- float2 src10_left = vload2(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1
- float2 src10_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
- float2 src10_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
-
- float2 src20_left = vload2(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2
- float2 src20_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
- float2 src20_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
-
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2);
-
- return pixels0;
-}
-
-/** Perform 3x3 convolution for stride_x=2 and stride_y=2 when DILATION_X>1 or DILATION_Y>1 for F32
- *
- * @param[in] src_addr Pointer to the starting position of where to perform the convolution
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] y_offset Offset from the source tensor from which to start convolution
- * @param[in] weights_addr Pointer from where to get weights
- * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension
- */
-inline float2 convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes,
- const int y_offset, __global uchar *weights_addr, const int weights_stride_y)
-{
- // Load the weights
- float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- float2 pixels0 = 0.0f;
-
- float3 src00_left = vload3(0, (__global float *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0
- float3 src00_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
- float3 src00_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
-
- float3 src10_left = vload3(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1
- float3 src10_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
- float3 src10_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
-
- float3 src20_left = vload3(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2
- float3 src20_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
- float3 src20_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
-
- CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0);
- CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1);
- CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2);
-
- return pixels0;
-}
-
-#endif /* (DILATION_X > 1 || DILATION_Y > 1) */
-
-/** This OpenCL kernel is optimized for Bifrost architectures and computes the depthwise convolution 3x3 when both
- * stride_x and stride_y are equal to 1
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: F32
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- float2 pixels0 = 0.0f;
- float2 pixels1 = 0.0f;
- float2 pixels2 = 0.0f;
- float2 pixels3 = 0.0f;
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
- // Load the weights
- float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- // Note: Since each work-item computes 4x2 elements, we need to load 6 rows from the input tensor
- float4 src00 = vload4(0, (__global float *)(src_addr + 0 * src_stride_y)); // Row0
- float4 src10 = vload4(0, (__global float *)(src_addr + 1 * src_stride_y)); // Row1
- float4 src20 = vload4(0, (__global float *)(src_addr + 2 * src_stride_y)); // Row2
- float4 src30 = vload4(0, (__global float *)(src_addr + 3 * src_stride_y)); // Row3
- float4 src40 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y)); // Row4
- float4 src50 = vload4(0, (__global float *)(src_addr + 5 * src_stride_y)); // Row5
-
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src00, weights_row0);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src10, weights_row1);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src20, weights_row2);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels1, src10, weights_row0);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels1, src20, weights_row1);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels1, src30, weights_row2);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels2, src20, weights_row0);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels2, src30, weights_row1);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels2, src40, weights_row2);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels3, src30, weights_row0);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels3, src40, weights_row1);
- CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels3, src50, weights_row2);
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- //3x3 Convolution of elements starting in 0th row
- pixels0 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 1st row
- pixels1 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src_stride_x, src_stride_y, 1, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 2nd row
- pixels2 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 3rd row
- pixels3 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src_stride_x, src_stride_y, 3, weights_addr, weights_stride_y);
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- float bias = *((__global float *)(vector_offset(&biases, channel)));
-
- pixels0 += (float2)bias;
- pixels1 += (float2)bias;
- pixels2 += (float2)bias;
- pixels3 += (float2)bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels0, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 0 * dst_stride_y));
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels1, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 1 * dst_stride_y));
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels2, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 2 * dst_stride_y));
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels3, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 3 * dst_stride_y));
-}
-
-/** This OpenCL kernel is optimized for Bifrost architectures and computes the depthwise convolution 3x3 when both
- * stride_x and stride_y are equal to 2
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: F32
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- float2 pixels0 = 0.0f;
- float2 pixels1 = 0.0f;
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- // Load the weights
- float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- // Note: Since each work-item computes 4x2 elements, we need to load 5 rows from the input tensor
- float4 src00 = vload4(0, (__global float *)(src_addr + 0 * src_stride_y)); // Row0
- float2 src01 = vload2(2, (__global float *)(src_addr + 0 * src_stride_y)); // Row0
- float4 src10 = vload4(0, (__global float *)(src_addr + 1 * src_stride_y)); // Row1
- float2 src11 = vload2(2, (__global float *)(src_addr + 1 * src_stride_y)); // Row1
- float4 src20 = vload4(0, (__global float *)(src_addr + 2 * src_stride_y)); // Row2
- float2 src21 = vload2(2, (__global float *)(src_addr + 2 * src_stride_y)); // Row2
- float4 src30 = vload4(0, (__global float *)(src_addr + 3 * src_stride_y)); // Row3
- float2 src31 = vload2(2, (__global float *)(src_addr + 3 * src_stride_y)); // Row3
- float4 src40 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y)); // Row4
- float2 src41 = vload2(2, (__global float *)(src_addr + 4 * src_stride_y)); // Row4
-
- CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src00, src01, weights_row0);
- CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src10, src11, weights_row1);
- CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src20, src21, weights_row2);
- CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels1, src20, src21, weights_row0);
- CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels1, src30, src31, weights_row1);
- CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels1, src40, src41, weights_row2);
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- //3x3 Convolution of elements starting in 0th row
- pixels0 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 2nd row
- pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y);
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- float bias = *((__global float *)(vector_offset(&biases, channel)));
-
- pixels0 += (float2)bias;
- pixels1 += (float2)bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels0, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 0 * dst_stride_y));
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels1, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 1 * dst_stride_y));
-}
-
-#endif // defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F32)
-
-#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DST_WIDTH)
-/** Reshape the weights for quantized depthwise convolution
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type, e.g. -DDATA_TYPE=uint8
- * @note Output width should be given as a preprocessor argument using -DDST_WIDTH=width, e.g. -DDST_WIDTH=128
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=vec_size, e.g., -DVEC_SIZE=4
- * @attention Input's height and width should be 3
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void depthwise_convolution_reshape_weights(
- TENSOR3D_DECLARATION(src),
- IMAGE_DECLARATION(dst))
-{
- Vector src = CONVERT_TO_VECTOR_STRUCT(src);
- const int x = get_global_id(0);
-
- // Load 3x3xVEC_SIZE weights
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- w0 = VLOAD(VEC_SIZE)(0, src.ptr + 0 * src_stride_y + 0 * src_stride_z);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- w1 = VLOAD(VEC_SIZE)(0, src.ptr + 1 * src_stride_y + 0 * src_stride_z);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- w2 = VLOAD(VEC_SIZE)(0, src.ptr + 2 * src_stride_y + 0 * src_stride_z);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- w3 = VLOAD(VEC_SIZE)(0, src.ptr + 0 * src_stride_y + 1 * src_stride_z);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- w4 = VLOAD(VEC_SIZE)(0, src.ptr + 1 * src_stride_y + 1 * src_stride_z);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- w5 = VLOAD(VEC_SIZE)(0, src.ptr + 2 * src_stride_y + 1 * src_stride_z);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- w6 = VLOAD(VEC_SIZE)(0, src.ptr + 0 * src_stride_y + 2 * src_stride_z);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- w7 = VLOAD(VEC_SIZE)(0, src.ptr + 1 * src_stride_y + 2 * src_stride_z);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- w8 = VLOAD(VEC_SIZE)(0, src.ptr + 2 * src_stride_y + 2 * src_stride_z);
-
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * DST_WIDTH * sizeof(DATA_TYPE);
-
-#if defined(TRANSPOSE)
-#if VEC_SIZE != 4
-#error "VEC_SIZE not supported"
-#else // VEC_SIZE != 4
- VSTORE(VEC_SIZE)
- ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w0.s0, w1.s0, w2.s0, w3.s0), 0, dst_addr + 0);
- VSTORE(VEC_SIZE)
- ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w4.s0, w5.s0, w6.s0, w7.s0), 0, dst_addr + 1 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w8.s0, w0.s1, w1.s1, w2.s1), 0, dst_addr + 2 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w3.s1, w4.s1, w5.s1, w6.s1), 0, dst_addr + 3 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w7.s1, w8.s1, w0.s2, w1.s2), 0, dst_addr + 4 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w2.s2, w3.s2, w4.s2, w5.s2), 0, dst_addr + 5 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w6.s2, w7.s2, w8.s2, w0.s3), 0, dst_addr + 6 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w1.s3, w2.s3, w3.s3, w4.s3), 0, dst_addr + 7 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w5.s3, w6.s3, w7.s3, w8.s3), 0, dst_addr + 8 * sizeof(DATA_TYPE) * VEC_SIZE);
-#endif // VEC_SIZE != 4
-#else // !defined(TRANSPOSE)
- VSTORE(VEC_SIZE)
- (w0, 0, dst_addr + 0);
- VSTORE(VEC_SIZE)
- (w1, 0, dst_addr + 1 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- (w2, 0, dst_addr + 2 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- (w3, 0, dst_addr + 3 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- (w4, 0, dst_addr + 4 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- (w5, 0, dst_addr + 5 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- (w6, 0, dst_addr + 6 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- (w7, 0, dst_addr + 7 * sizeof(DATA_TYPE) * VEC_SIZE);
- VSTORE(VEC_SIZE)
- (w8, 0, dst_addr + 8 * sizeof(DATA_TYPE) * VEC_SIZE);
-#endif // defined(TRANSPOSE)
-}
-#endif // defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DST_WIDTH)
-
-#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F16)
-#if defined(CONV_STRIDE_X)
-#if CONV_STRIDE_X == 1
-#define convolution1x3_f16 convolution1x3_stride_1_f16
-#elif CONV_STRIDE_X == 2
-#define convolution1x3_f16 convolution1x3_stride_2_f16
-#elif CONV_STRIDE_X == 3
-#define convolution1x3_f16 convolution1x3_stride_3_f16
-#else /* CONV_STRIDE_X */
-#error "Stride not supported"
-#endif /* CONV_STRIDE_X */
-
-#if(DILATION_X > 1 || DILATION_Y > 1)
-
-/** Perform 3x3 convolution for stride_x=1 and stride_y=1 when DILATION_X>1 or DILATION_Y>1 for f16
- *
- * @param[in] src_addr Pointer to the starting position of where to perform the convolution
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] y_offset Offset from the source tensor from which to start convolution
- * @param[in] weights_addr Pointer from where to get weights
- * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension
- */
-inline half4 convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes,
- const int y_offset, __global uchar *weights_addr, const int weights_stride_y)
-{
- // Load the weights
- half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y));
- half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y));
- half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y));
-
- half4 pixels0 = 0.0f;
-
- half4 src00_left = vload4(0, (__global half *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0
- half4 src00_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
- half4 src00_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
-
- half4 src10_left = vload4(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1
- half4 src10_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
- half4 src10_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
-
- half4 src20_left = vload4(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2
- half4 src20_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
- half4 src20_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
-
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2);
-
- return pixels0;
-}
-
-/** Perform 3x3 convolution for stride_x=2 and stride_y=2 when DILATION_X>1 or DILATION_Y>1 for F16
- *
- * @param[in] src_addr Pointer to the starting position of where to perform the convolution
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] y_offset Offset from the source tensor from which to start convolution
- * @param[in] weights_addr Pointer from where to get weights
- * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension
- */
-inline half4 convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes,
- const int y_offset, __global uchar *weights_addr, const int weights_stride_y)
-{
- // Load the weights
- half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y));
- half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y));
- half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y));
-
- half4 pixels0 = 0.0f;
-
- half8 src00_left = vload8(0, (__global half *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0
- half8 src00_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
- half8 src00_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
-
- half8 src10_left = vload8(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1
- half8 src10_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
- half8 src10_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
-
- half8 src20_left = vload8(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2
- half8 src20_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
- half8 src20_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
-
- CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0);
- CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1);
- CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2);
-
- return pixels0;
-}
-
-#endif // (DILATION_X > 1 && DILATION_Y > 1)
-
-/** Compute a 1D horizontal convolution of size 3 and stride 1 for 16bit floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a half4 containing 4 convoluted values.
- */
-inline half4 convolution1x3_stride_1_f16(__global const uchar *left_pixel,
- const half left_coeff,
- const half middle_coeff,
- const half right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- half8 temp = vload8(0, (__global half *)left_pixel);
-
- half4 left = CONVERT(temp.s0123, half4);
- half4 middle = CONVERT(temp.s1234, half4);
- half4 right = CONVERT(temp.s2345, half4);
-
- return left * (half4)left_coeff + middle * (half4)middle_coeff + right * (half4)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
- return vload4(0, (__global half *)left_pixel) * (half4)left_coeff
- + vload4(0, (__global half *)(left_pixel) + DILATION_X) * (half4)middle_coeff
- + vload4(0, (__global half *)(left_pixel) + 2 * DILATION_X) * (half4)right_coeff;
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Compute a 1D horizontal convolution of size 3 and stride 2 for 16bit floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a half4 containing 4 convoluted values.
- */
-inline half4 convolution1x3_stride_2_f16(__global const uchar *left_pixel,
- const half left_coeff,
- const half middle_coeff,
- const half right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- half8 temp0 = vload8(0, (__global half *)left_pixel);
- half temp1 = *((__global half *)(left_pixel + 8 * sizeof(half)));
-
- half4 left = CONVERT(temp0.s0246, half4);
- half4 middle = CONVERT(temp0.s1357, half4);
- half4 right = CONVERT((half4)(temp0.s246, temp1), half4);
-
- return left * (half4)left_coeff + middle * (half4)middle_coeff + right * (half4)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- __global half *left_pixel_float = (__global half *)left_pixel;
-
- return (half4)(*left_pixel_float, *(left_pixel_float + 2), *(left_pixel_float + 4), *(left_pixel_float + 6)) * (half4)left_coeff
- + (half4)(*(left_pixel_float + DILATION_X), *(left_pixel_float + DILATION_X + 2), *(left_pixel_float + DILATION_X + 4), *(left_pixel_float + DILATION_X + 6)) * (half4)middle_coeff
- + (half4)(*(left_pixel_float + DILATION_X * 2), *(left_pixel_float + DILATION_X * 2 + 2), *(left_pixel_float + DILATION_X * 2 + 4), *(left_pixel_float + DILATION_X * 2 + 6)) * (half4)right_coeff;
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Compute a 1D horizontal convolution of size 3 and stride 3 for 16bit floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a half4 containing 4 convoluted values.
- */
-inline half4 convolution1x3_stride_3_f16(__global const uchar *left_pixel,
- const half left_coeff,
- const half middle_coeff,
- const half right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- half16 temp0 = vload16(0, (__global half *)left_pixel);
-
- half4 left = CONVERT(temp0.s0369, half4);
- half4 middle = CONVERT(temp0.s147A, half4);
- half4 right = CONVERT(temp0.s258B, half4);
-
- return left * (half4)left_coeff + middle * (half4)middle_coeff + right * (half4)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- __global half *left_pixel_float = (__global half *)left_pixel;
-
- return (half4)(*left_pixel_float, *(left_pixel_float + 3), *(left_pixel_float + 6), *(left_pixel_float + 9)) * (half4)left_coeff
- + (half4)(*(left_pixel_float + DILATION_X), *(left_pixel_float + DILATION_X + 3), *(left_pixel_float + DILATION_X + 6), *(left_pixel_float + DILATION_X + 9)) * (half4)middle_coeff
- + (half4)(*(left_pixel_float + DILATION_X * 2), *(left_pixel_float + DILATION_X * 2 + 3), *(left_pixel_float + DILATION_X * 2 + 6), *(left_pixel_float + DILATION_X * 2 + 9)) * (half4)right_coeff;
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Apply a 3x3 convolution matrix to a single channel F16 input image and return the result.
- *
- * Convolution matrix layout:
- *
- * [ mat0, mat1, mat2 ]\n
- * [ mat3, mat4, mat5 ]\n
- * [ mat6, mat7, mat8 ]\n
- *
- * @param[in] src A pointer to source Image structure
- * @param[in] mat0 Coefficient from the convolution matrix
- * @param[in] mat1 Coefficient from the convolution matrix
- * @param[in] mat2 Coefficient from the convolution matrix
- * @param[in] mat3 Coefficient from the convolution matrix
- * @param[in] mat4 Coefficient from the convolution matrix
- * @param[in] mat5 Coefficient from the convolution matrix
- * @param[in] mat6 Coefficient from the convolution matrix
- * @param[in] mat0 Coefficient from the convolution matrix
- * @param[in] mat7 Coefficient from the convolution matrix
- * @param[in] mat8 Coefficient from the convolution matrix
- *
- * @return a half4 containing 4 convoluted values.
- */
-inline half4 convolution3x3_f16(
- __global uchar *src, uint src_stride_y,
- const half mat0, const half mat1, const half mat2,
- const half mat3, const half mat4, const half mat5,
- const half mat6, const half mat7, const half mat8)
-{
- half4 pixels;
-
- pixels = convolution1x3_f16(src, mat0, mat1, mat2);
- pixels += convolution1x3_f16(src + DILATION_Y * src_stride_y, mat3, mat4, mat5);
- pixels += convolution1x3_f16(src + DILATION_Y * 2 * src_stride_y, mat6, mat7, mat8);
-
- return pixels;
-}
-
-#if defined(DEPTH_MULTIPLIER)
-
-/** This OpenCL kernel computes the depthwise convolution 3x3
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types: half.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F16
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_f16(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-#endif //defined(HAS_BIAS)
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
-
- uchar3 offset = (uchar3)(0, 1, 2) * (uchar3)weights_stride_y;
- half3 weights_values0 = vload3(0, (__global half *)(weights_addr + offset.s0));
- half3 weights_values1 = vload3(0, (__global half *)(weights_addr + offset.s1));
- half3 weights_values2 = vload3(0, (__global half *)(weights_addr + offset.s2));
-
- half4 pixels = convolution3x3_f16(src_addr, src_stride_y, weights_values0.s0, weights_values0.s1, weights_values0.s2,
- weights_values1.s0, weights_values1.s1, weights_values1.s2,
- weights_values2.s0, weights_values2.s1, weights_values2.s2);
-#if defined(HAS_BIAS)
- pixels += (half4)(*((__global half *)(biases.ptr + channel * biases_stride_x)));
-#endif //defined(HAS_BIAS)
-
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels, A_VAL, B_VAL), 0, (__global half *)dst.ptr);
-}
-#endif // defined(DEPTH_MULTIPLIER)
-#endif // defined(CONV_STRIDE_X)
-
-/** This OpenCL kernel is optimized for Bifrost architectures and computes the 16bit floating point depthwise convolution 3x3
- * when both stride_x and stride_y are equal to 1
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types: half.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as @p src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- half bias = *((__global half *)(vector_offset(&biases, channel)));
-#endif /* defined(HAS_BIAS) */
-
- half4 pixels0 = 0.0f;
- half4 pixels1 = 0.0f;
- half4 pixels2 = 0.0f;
- half4 pixels3 = 0.0f;
-
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
- // Load the weights
- half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y));
- half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y));
- half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y));
-
- // Note: Since each work-item computes 4x4 elements, we need to load 6 rows from the input tensor
- half8 src00 = vload8(0, (__global half *)(src_addr + 0 * src_stride_y)); // Row0
- half8 src10 = vload8(0, (__global half *)(src_addr + 1 * src_stride_y)); // Row1
- half8 src20 = vload8(0, (__global half *)(src_addr + 2 * src_stride_y)); // Row2
- half8 src30 = vload8(0, (__global half *)(src_addr + 3 * src_stride_y)); // Row3
- half8 src40 = vload8(0, (__global half *)(src_addr + 4 * src_stride_y)); // Row4
- half8 src50 = vload8(0, (__global half *)(src_addr + 5 * src_stride_y)); // Row5
-
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src00, weights_row0);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src10, weights_row1);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src20, weights_row2);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels1, src10, weights_row0);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels1, src20, weights_row1);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels1, src30, weights_row2);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels2, src20, weights_row0);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels2, src30, weights_row1);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels2, src40, weights_row2);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels3, src30, weights_row0);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels3, src40, weights_row1);
- CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels3, src50, weights_row2);
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- //3x3 Convolution of elements starting in 0th row
- pixels0 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 1st row
- pixels1 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src_stride_x, src_stride_y, 1, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 2nd row
- pixels2 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 3rd row
- pixels3 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src_stride_x, src_stride_y, 3, weights_addr, weights_stride_y);
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#ifdef HAS_BIAS
- pixels0 += (half4)bias;
- pixels1 += (half4)bias;
- pixels2 += (half4)bias;
- pixels3 += (half4)bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels0, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 0 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels1, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 1 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels2, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 2 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels3, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 3 * dst_stride_y));
-}
-
-/** This OpenCL kernel is optimized for Bifrost architectures and computes 16bit floating point the depthwise convolution 3x3
- * when both stride_x and stride_y are equal to 2
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types: half.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as @p src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- half bias = *((__global half *)(vector_offset(&biases, channel)));
-#endif /* defined(HAS_BIAS) */
-
- half4 pixels0 = 0.0f;
- half4 pixels1 = 0.0f;
-
- // Load relevant input and weights data ( Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- // Load the weights
- half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y));
- half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y));
- half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y));
-
- // Note: Since each work-item computes 2x4 elements, we need to load 5 rows from the input tensor
- half8 src00 = vload8(0, (__global half *)(src_addr + 0 * src_stride_y)); // Row0
- half2 src01 = vload2(4, (__global half *)(src_addr + 0 * src_stride_y)); // Row0
- half8 src10 = vload8(0, (__global half *)(src_addr + 1 * src_stride_y)); // Row1
- half2 src11 = vload2(4, (__global half *)(src_addr + 1 * src_stride_y)); // Row1
- half8 src20 = vload8(0, (__global half *)(src_addr + 2 * src_stride_y)); // Row2
- half2 src21 = vload2(4, (__global half *)(src_addr + 2 * src_stride_y)); // Row2
- half8 src30 = vload8(0, (__global half *)(src_addr + 3 * src_stride_y)); // Row3
- half2 src31 = vload2(4, (__global half *)(src_addr + 3 * src_stride_y)); // Row3
- half8 src40 = vload8(0, (__global half *)(src_addr + 4 * src_stride_y)); // Row4
- half2 src41 = vload2(4, (__global half *)(src_addr + 4 * src_stride_y)); // Row4
-
- CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src00, src01, weights_row0);
- CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src10, src11, weights_row1);
- CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src20, src21, weights_row2);
- CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels1, src20, src21, weights_row0);
- CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels1, src30, src31, weights_row1);
- CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels1, src40, src41, weights_row2);
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
- //3x3 Convolution of elements starting in 0th row
- pixels0 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 2nd row
- pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y);
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#ifdef HAS_BIAS
- pixels0 += (half4)bias;
- pixels1 += (half4)bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels0, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 0 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels1, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 1 * dst_stride_y));
-}
-#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F16)
-
-#if defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(N0) && defined(DATA_TYPE) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(VEC_SIZE_LEFTOVER)
-/** This function computes the depthwise convolution for NHWC data layout. This kernel assumes that the weights tensor is NOT reshaped
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The number of elements processed must be passed at compile time using -DN0 (e.g. -DN0=2)
- * @note The depth multiplier must be passed at compile time using -DDEPTH_MULTIPLIER (e.g. -DDEPTH_MULTIPLIER=1)
- * @note The first dimension of the input tensor must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM1=112)
- * @note The second dimension of the input tensor must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM2=80)
- * @note The kernel width must be passed at compile time using -DKERNEL_WIDTH (e.g. -DKERNEL_WIDTH=5)
- * @note The kernel height must be passed at compile time using -DKERNEL_HEIGHT (e.g. -DKERNEL_HEIGHT=5)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
- * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X)
- * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1)
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F16/F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void dwc_MxN_native_fp_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif // defined(HAS_BIAS)
-)
-{
- int x_offs = max((int)(get_global_id(0) * N0 - (N0 - VEC_SIZE_LEFTOVER) % N0), 0) * sizeof(DATA_TYPE);
-
- int x = get_global_id(0); // channels
- int y = get_global_id(1); // spatial coordinate x
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + x_offs;
-
- __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * (int)DEPTH_MULTIPLIER + y * dst_stride_y + z * dst_stride_z;
-
- __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + x_offs * (int)DEPTH_MULTIPLIER;
-
-#if defined(HAS_BIAS)
- __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + x_offs * (int)DEPTH_MULTIPLIER;
-#endif // defined(HAS_BIAS)
-
-#if defined(DST_DEPTH)
- s_addr += b * src_stride_w;
- d_addr += b * dst_stride_w;
-#endif // defined(DST_DEPTH)
-
- for(int d = 0; d < (int)DEPTH_MULTIPLIER; ++d)
- {
- // Each work-item computes N0x1x1 elements
- VEC_DATA_TYPE(DATA_TYPE, N0)
- res0 = 0;
-
- int x_coord = y * CONV_STRIDE_X - (int)CONV_PAD_LEFT;
- int y_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP;
-
- for(int yk = 0; yk < KERNEL_HEIGHT; ++yk)
- {
- if(y_coord >= 0 && y_coord < SRC_DIM2)
- {
- int x_coord_tmp = x_coord;
-
- for(int xk = 0; xk < KERNEL_WIDTH; ++xk)
- {
- if(x_coord_tmp >= 0 && x_coord_tmp < SRC_DIM1)
- {
- int s_offset = x_coord_tmp * (int)src_stride_y + y_coord * (int)src_stride_z;
- int w_offset = xk * weights_stride_y + yk * weights_stride_z;
-
- // Load input and weights values
- VEC_DATA_TYPE(DATA_TYPE, N0)
- i = VLOAD(N0)(0, (__global DATA_TYPE *)(s_addr + s_offset));
- VEC_DATA_TYPE(DATA_TYPE, N0)
- w = VLOAD(N0)(0, (__global DATA_TYPE *)(w_addr + w_offset));
-
-#if GPU_ARCH == GPU_ARCH_MIDGARD
- res0 += i * w;
-#else // GPU_ARCH == GPU_ARCH_MIDGARD
- res0 = fma(i, w, res0);
-#endif // GPU_ARCH == GPU_ARCH_MIDGARD
- }
- x_coord_tmp += DILATION_X;
- }
- }
- y_coord += DILATION_Y;
- }
-
-#if defined(HAS_BIAS)
- res0 += VLOAD(N0)(0, (__global DATA_TYPE *)(b_addr));
-#endif // defined(HAS_BIAS)
-
- res0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, N0, res0, A_VAL, B_VAL);
-
- STORE_VECTOR_SELECT(res, DATA_TYPE, d_addr, N0, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
-
- w_addr += sizeof(DATA_TYPE);
- d_addr += sizeof(DATA_TYPE);
-#if defined(HAS_BIAS)
- b_addr += sizeof(DATA_TYPE);
-#endif // defined(HAS_BIAS)
- }
-}
-#endif // defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defiend(N0) && defined(DATA_TYPE) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(VEC_SIZE_LEFTOVER)
-
-#if defined(VEC_SIZE) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) && defined(DATA_TYPE)
-
-#if DATA_TYPE != float || DATA_TYPE != half
-#error "Unsupported data type"
-#endif // DATA_TYPE != float || DATA_TYPE != half
-
-#define VEC_FLOAT VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-
-#define FILL_ZERO_OUT_OF_BOUND_3(data_type, vec_size, basename, cond) \
- ({ \
- basename##0 = select(basename##0, (VEC_DATA_TYPE(data_type, vec_size))0, (SELECT_VEC_DATA_TYPE(data_type, vec_size))((cond).s0)); \
- basename##1 = select(basename##1, (VEC_DATA_TYPE(data_type, vec_size))0, (SELECT_VEC_DATA_TYPE(data_type, vec_size))((cond).s1)); \
- basename##2 = select(basename##2, (VEC_DATA_TYPE(data_type, vec_size))0, (SELECT_VEC_DATA_TYPE(data_type, vec_size))((cond).s2)); \
- })
-
-#define FILL_ZERO_OUT_OF_BOUND_4(data_type, vec_size, basename, cond) \
- ({ \
- FILL_ZERO_OUT_OF_BOUND_3(data_type, vec_size, basename, cond); \
- basename##3 = select(basename##3, (VEC_DATA_TYPE(data_type, vec_size))0, (SELECT_VEC_DATA_TYPE(data_type, vec_size))((cond).s3)); \
- })
-
-#if defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y)
-
-/** This function computes the depthwise convolution for NHWC data layout when the stride along the width or height is not 1.
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
- * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X)
- * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1)
- * @note The dilation_x and dilation_y must be passed at compile time using -DDILATION_X and -DDILATION_Y: e.g. -DDILATION_X=1, -DDILATION_Y=1
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note In case of biases, -DHAS_BIAS must to be passed at compile
- * @note If the output tensor has more than three dimensions, its third dimension must be passed at compile time using -DDST_DEPTH (e.g. -DDST_DEPTH=32)
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F16/F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] max_offset Max offset for the input tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif /* defined(HAS_BIAS) */
-)
-{
- int x_offset = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - PARTIAL_STORE_N0) % VEC_SIZE), 0) * sizeof(DATA_TYPE);
- int y = get_global_id(1); // spatial coordinate x
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x_offset;
-
-#if defined(DST_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offset + b * src_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offset;
-#endif /* defined(DST_DEPTH) */
-
- int3 src_coord_y = (int3)(y * CONV_STRIDE_X - CONV_PAD_LEFT) + (int3)(0, DILATION_X, 2 * DILATION_X);
- int3 src_coord_z = (int3)(z * CONV_STRIDE_Y - CONV_PAD_TOP) + (int3)(0, DILATION_Y, 2 * DILATION_Y);
-
- int3 src_offset_y = clamp(src_coord_y, (int3)0, (int3)(SRC_DIM_1 - 1));
- int3 src_offset_z = clamp(src_coord_z, (int3)0, (int3)(SRC_DIM_2 - 1));
-
- // Use these vectors to check whether the unclamped load would have been out of bounds
- src_coord_y = (src_offset_y != src_coord_y);
- src_coord_z = (src_offset_z != src_coord_z);
-
- src_offset_y *= (int3)src_stride_y;
- src_offset_z *= (int3)src_stride_z;
-
- // We compute VEC_SIZEx1x1 [C,W,H] elements
- VEC_FLOAT acc0 = 0;
-
- // Load weights
- VEC_FLOAT w0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 2 * weights_stride_z));
- VEC_FLOAT w7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 2 * weights_stride_z));
- VEC_FLOAT w8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 2 * weights_stride_z));
-
- // Load input values
- // z == 0
- VEC_FLOAT values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s0));
- VEC_FLOAT values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s1));
- VEC_FLOAT values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s2));
-
- FILL_ZERO_OUT_OF_BOUND_3(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int3)src_coord_z.s0);
-
- acc0 = fma(values0, w0, acc0);
- acc0 = fma(values1, w1, acc0);
- acc0 = fma(values2, w2, acc0);
-
- // z == 1
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s2));
-
- FILL_ZERO_OUT_OF_BOUND_3(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int3)src_coord_z.s1);
-
- acc0 = fma(values0, w3, acc0);
- acc0 = fma(values1, w4, acc0);
- acc0 = fma(values2, w5, acc0);
-
- // z == 2
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s2));
-
- FILL_ZERO_OUT_OF_BOUND_3(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int3)src_coord_z.s2);
-
- acc0 = fma(values0, w6, acc0);
- acc0 = fma(values1, w7, acc0);
- acc0 = fma(values2, w8, acc0);
-
-#if defined(HAS_BIAS)
- __global uchar *biases_addr = biases_ptr + biases_offset_first_element_in_bytes + x_offset;
- VEC_FLOAT bias_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)biases_addr);
- acc0 += bias_values;
-#endif // defined(HAS_BIAS)
-
-#if defined(DST_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offset + y * dst_step_y + z * dst_step_z + b * dst_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offset + y * dst_step_y + z * dst_step_z;
-#endif /* defined(DST_DEPTH) */
-
- acc0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc0, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(acc, DATA_TYPE, dst_addr, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
-}
-#endif // defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y)
-
-#if defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED)
-/** This function computes the depthwise convolution for NHWC data layout when the stride along the width and height is 1.
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2)
- * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The size of the output's second dimension must be passed at compile time using -DDST_DIM_1 (e.g. -DDST_DIM_1=64)
- * @note The size of the output's third dimension must be passed at compile time using -DDST_DIM_2 (e.g. -DDST_DIM_2=32)
- * @note In case of biases, -DHAS_BIAS must to be passed at compile
- * @note If the output tensor has more than three dimensions, its third dimension must be passed at compile time using -DDST_DEPTH (e.g. -DDST_DEPTH=32)
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F16/F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] max_offset Max offset for the input tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_nhwc_stride1(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif /* defined(HAS_BIAS) */
-)
-{
- int x_offset = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - PARTIAL_STORE_N0) % VEC_SIZE), 0) * sizeof(DATA_TYPE);
- int y = get_global_id(1); // spatial coordinate x
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x_offset;
-
-#if defined(DST_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offset + b * src_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offset;
-#endif /* defined(DST_DEPTH) */
-
- int4 src_coord_y = (int4)(y * NUM_ROWS_PROCESSED - CONV_PAD_LEFT) + V_OFFS4(int);
- int4 src_coord_z = (int4)(z * NUM_PLANES_PROCESSED - CONV_PAD_TOP) + V_OFFS4(int);
-
- int4 src_offset_y = clamp(src_coord_y, (int4)0, (int4)(SRC_DIM_1 - 1));
- int4 src_offset_z = clamp(src_coord_z, (int4)0, (int4)(SRC_DIM_2 - 1));
-
- // Use these vectors to check whether the unclamped load would have been out of bounds
- src_coord_y = (src_offset_y != src_coord_y);
- src_coord_z = (src_offset_z != src_coord_z);
-
- src_offset_y *= (int4)src_stride_y;
- src_offset_z *= (int4)src_stride_z;
-
- // We compute VEC_SIZEx2x2 [C,W,H] elements
- VEC_FLOAT acc0 = 0;
- VEC_FLOAT acc1 = 0;
- VEC_FLOAT acc2 = 0;
- VEC_FLOAT acc3 = 0;
-
- // Load weights
- VEC_FLOAT w0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 2 * weights_stride_z));
- VEC_FLOAT w7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 2 * weights_stride_z));
- VEC_FLOAT w8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 2 * weights_stride_z));
-
- // Load input values
- // z == 0
- VEC_FLOAT values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s0));
- VEC_FLOAT values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s1));
- VEC_FLOAT values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s2));
- VEC_FLOAT values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s3));
-
- FILL_ZERO_OUT_OF_BOUND_4(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int4)src_coord_z.s0);
-
- acc0 = fma(values0, w0, acc0);
- acc0 = fma(values1, w1, acc0);
- acc0 = fma(values2, w2, acc0);
- acc1 = fma(values1, w0, acc1);
- acc1 = fma(values2, w1, acc1);
- acc1 = fma(values3, w2, acc1);
-
- // z == 1
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s2));
- values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s3));
-
- FILL_ZERO_OUT_OF_BOUND_4(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int4)src_coord_z.s1);
-
- acc0 = fma(values0, w3, acc0);
- acc0 = fma(values1, w4, acc0);
- acc0 = fma(values2, w5, acc0);
- acc1 = fma(values1, w3, acc1);
- acc1 = fma(values2, w4, acc1);
- acc1 = fma(values3, w5, acc1);
-
- acc2 = fma(values0, w0, acc2);
- acc2 = fma(values1, w1, acc2);
- acc2 = fma(values2, w2, acc2);
- acc3 = fma(values1, w0, acc3);
- acc3 = fma(values2, w1, acc3);
- acc3 = fma(values3, w2, acc3);
-
- // z == 2
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s2));
- values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s3));
-
- FILL_ZERO_OUT_OF_BOUND_4(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int4)src_coord_z.s2);
-
- acc0 = fma(values0, w6, acc0);
- acc0 = fma(values1, w7, acc0);
- acc0 = fma(values2, w8, acc0);
- acc1 = fma(values1, w6, acc1);
- acc1 = fma(values2, w7, acc1);
- acc1 = fma(values3, w8, acc1);
-
- acc2 = fma(values0, w3, acc2);
- acc2 = fma(values1, w4, acc2);
- acc2 = fma(values2, w5, acc2);
- acc3 = fma(values1, w3, acc3);
- acc3 = fma(values2, w4, acc3);
- acc3 = fma(values3, w5, acc3);
-
- // z == 3
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s3 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s3 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s3 + src_offset_y.s2));
- values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s3 + src_offset_y.s3));
-
- FILL_ZERO_OUT_OF_BOUND_4(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int4)src_coord_z.s3);
-
- acc2 = fma(values0, w6, acc2);
- acc2 = fma(values1, w7, acc2);
- acc2 = fma(values2, w8, acc2);
- acc3 = fma(values1, w6, acc3);
- acc3 = fma(values2, w7, acc3);
- acc3 = fma(values3, w8, acc3);
-
-#if defined(HAS_BIAS)
- __global uchar *biases_addr = biases_ptr + biases_offset_first_element_in_bytes + x_offset;
-
- VEC_FLOAT bias_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)biases_addr);
-
- acc0 += bias_values;
- acc1 += bias_values;
- acc2 += bias_values;
- acc3 += bias_values;
-#endif // defined(HAS_BIAS)
-
- int2 dst_offset_y = min((int2)(y * NUM_ROWS_PROCESSED) + V_OFFS2(int), (int2)(DST_DIM_1 - 1)) * (int2)dst_stride_y;
- int dst_coord_z = z * NUM_PLANES_PROCESSED;
-
-#if defined(DST_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offset + dst_coord_z * dst_stride_z + b * dst_stride_w;
-#else // defined(DST_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offset + dst_coord_z * dst_stride_z;
-#endif // defined(DST_DEPTH)
-
- /* Store vectors in reverse order along the Y. The Y offsets are calculated so that they are forced to be in bound.
- * If only the first address is in bound, the Y offset of the second address will be brought back and there will be 2 writes in the same location for the same thread.
- * Since the last vector to be written is always the valid one for that location, it overwrites the wrong values.
- */
- values0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc1, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr + dst_offset_y.s1, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
-
- values0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc0, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr + dst_offset_y.s0, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
-
-#if((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
- if((dst_coord_z + 1) < DST_DIM_2)
-#endif // ((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
- {
- values0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc3, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr + dst_stride_z + dst_offset_y.s1, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
-
- values0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc2, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr + dst_stride_z + dst_offset_y.s0, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
- }
-}
-
-#endif // defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED)
-#endif // defined(VEC_SIZE) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) && defined(DATA_TYPE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
deleted file mode 100644
index c7fe401f80..0000000000
--- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
+++ /dev/null
@@ -1,1790 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "helpers_asymm.h"
-
-#ifndef VEC_SIZE
-#if defined(N0)
-#define VEC_SIZE N0
-#else /* defined(N0) */
-#define VEC_SIZE 8
-#endif /* defined(N0) */
-#endif /* VEC_SIZE */
-
-#if defined(ACTIVATION_TYPE) && defined(CONST_0)
-#include "activation_layer_quant.cl"
-#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QUANT(ACTIVATION_TYPE, x)
-#else /* defined(ACTIVATION_TYPE) && defined(CONST_0) */
-#define ACTIVATION_FUNC(x) (x)
-#endif /* defined(ACTIVATION_TYPE) && defined(CONST_0) */
-
-#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
-#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE)
-#define VEC_SHORT VEC_DATA_TYPE(short, VEC_SIZE)
-
-#if defined(DATA_TYPE) && defined(WEIGHTS_TYPE)
-
-#define VEC_TYPE(size) VEC_DATA_TYPE(DATA_TYPE, size)
-
-#if defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER))
-
-#if defined(WEIGHTS_PROMOTED_TYPE)
-#define VEC_WEIGHTS_PROMOTED_TYPE(size) VEC_DATA_TYPE(WEIGHTS_PROMOTED_TYPE, size)
-
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), val);
-#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define ARM_DOT(x, y, val) val += arm_dot((x), (y));
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-
-#if defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS)
-
-#if CONV_STRIDE_X > 3
-#error "Stride X not supported"
-#endif /* CONV_STRIDE_X > 3 */
-
-#if !defined(IS_DOT8)
-
-#if DILATION_X == 1
-
-#if CONV_STRIDE_X == 1
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int8 temp0 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value)), int8); \
- int2 temp1 = CONVERT(vload2(0, (__global DATA_TYPE *)(first_value + 8 * sizeof(DATA_TYPE))), int2); \
- \
- left = CONVERT(temp0.s01234567, int8); \
- middle = CONVERT((int8)(temp0.s1234, temp0.s567, temp1.s0), int8); \
- right = CONVERT((int8)(temp0.s2345, temp0.s67, temp1.s01), int8); \
- })
-#elif CONV_STRIDE_X == 2
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \
- int temp1 = CONVERT(*((__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))), int); \
- \
- left = CONVERT(temp0.s02468ace, int8); \
- middle = CONVERT(temp0.s13579bdf, int8); \
- right = CONVERT((int8)(temp0.s2468, temp0.sace, temp1), int8); \
- })
-#else /* CONV_STRIDE_X */
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \
- int8 temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))), int8); \
- \
- left = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \
- middle = CONVERT((int8)(temp0.s147a, temp0.sd, temp1.s036), int8); \
- right = CONVERT((int8)(temp0.s258b, temp0.se, temp1.s147), int8); \
- })
-#endif /* CONV_STRIDE_X */
-
-#else /* DILATION_X == 1 */
-
-#if CONV_STRIDE_X == 1
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- left = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value)), int8); \
- middle = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))), int8); \
- right = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))), int8); \
- })
-#elif CONV_STRIDE_X == 2
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \
- left = CONVERT(temp0.s02468ace, int8); \
- \
- temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))), int16); \
- middle = CONVERT(temp0.s02468ace, int8); \
- \
- temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))), int16); \
- right = CONVERT(temp0.s02468ace, int8); \
- })
-#else /* CONV_STRIDE_X */
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \
- int8 temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))), int8); \
- left = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \
- \
- temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))), int16); \
- temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + (16 + DILATION_X) * sizeof(DATA_TYPE))), int8); \
- middle = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \
- \
- temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))), int16); \
- temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + (16 + 2 * DILATION_X) * sizeof(DATA_TYPE))), int8); \
- right = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \
- })
-
-#endif /* CONV_STRIDE_X */
-#endif /* DILATION_X==1 */
-
-/** This function computes the depthwise convolution quantized.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32
- * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes)
- * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector
- * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32
- * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes)
- * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-
-__kernel void dwc_3x3_native_quantized8_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
- VECTOR_DECLARATION(output_multipliers),
- VECTOR_DECLARATION(output_shifts)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z;
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_multipliers);
- Vector output_shifts = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_shifts);
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
-
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- int bias_value = *((__global int *)(vector_offset(&biases, channel)));
-#endif //defined(HAS_BIAS)
-
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- src_addr -= batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z + (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
-
- VEC_DATA_TYPE(WEIGHTS_TYPE, 3)
- w0 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 0 * weights_stride_y));
- VEC_DATA_TYPE(WEIGHTS_TYPE, 3)
- w1 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 1 * weights_stride_y));
- VEC_DATA_TYPE(WEIGHTS_TYPE, 3)
- w2 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * weights_stride_y));
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- const int output_multiplier = *((__global int *)vector_offset(&output_multipliers, channel));
- const int output_shift = *((__global int *)vector_offset(&output_shifts, channel));
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
- int8 values0 = 0;
- int8 sum0 = 0;
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- int8 values1 = 0;
- int8 sum1 = 0;
-#endif /* CONV_STRIDE_Y &&DILATION_Y==1 */
-
- // Row0
- int8 left, middle, right;
- GET_VALUES(src_addr + 0 * src_stride_y, left, middle, right);
- values0 += left * (int8)(w0.s0);
- values0 += middle * (int8)(w0.s1);
- values0 += right * (int8)(w0.s2);
-
-#if WEIGHTS_OFFSET != 0
- sum0 += left + middle + right;
-#endif /* WEIGHTS_OFFSET != 0 */
-
- // Row1
- GET_VALUES(src_addr + DILATION_Y * src_stride_y, left, middle, right);
- values0 += left * (int8)(w1.s0);
- values0 += middle * (int8)(w1.s1);
- values0 += right * (int8)(w1.s2);
-
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += left * (int8)(w0.s0);
- values1 += middle * (int8)(w0.s1);
- values1 += right * (int8)(w0.s2);
-#endif /* CONV_STRIDE_Y && DILATION_Y== 1 */
-
-#if WEIGHTS_OFFSET != 0
- int8 tmp = left + middle + right;
- sum0 += tmp;
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- sum1 += tmp;
-#endif /* CONV_STRIDE_Y &&DILATION_Y== 1 */
-#endif /* WEIGHTS_OFFSET != 0 */
-
- // Row2
- GET_VALUES(src_addr + 2 * DILATION_Y * src_stride_y, left, middle, right);
- values0 += left * (int8)(w2.s0);
- values0 += middle * (int8)(w2.s1);
- values0 += right * (int8)(w2.s2);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += left * (int8)(w1.s0);
- values1 += middle * (int8)(w1.s1);
- values1 += right * (int8)(w1.s2);
-#endif /* CONV_STRIDE_Y &&DILATION_Y == 1 */
-
-#if WEIGHTS_OFFSET != 0
- tmp = left + middle + right;
- sum0 += tmp;
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- sum1 += tmp;
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- // Row3
- GET_VALUES(src_addr + 3 * src_stride_y, left, middle, right);
- values1 += left * (int8)(w2.s0);
- values1 += middle * (int8)(w2.s1);
- values1 += right * (int8)(w2.s2);
-
-#if WEIGHTS_OFFSET != 0
- sum1 += left + middle + right;
-#endif /* WEIGHTS_OFFSET != 0 */
-#endif /* CONV_STRIDE_Y && DILATION_Y == 1 */
-
-#if defined(HAS_BIAS)
- values0 += (int8)(bias_value);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += (int8)(bias_value);
-#endif /* CONV_STRIDE_Y & &DILATION_Y == 1 */
-#endif //defined(HAS_BIAS)
-
-#if WEIGHTS_OFFSET != 0
- values0 += sum0 * (int8)(WEIGHTS_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += sum1 * (int8)(WEIGHTS_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if INPUT_OFFSET != 0
- VEC_WEIGHTS_PROMOTED_TYPE(3)
- tmp_we = CONVERT(w0, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w1, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w2, VEC_WEIGHTS_PROMOTED_TYPE(3));
-
- WEIGHTS_PROMOTED_TYPE sum_weights = tmp_we.s0 + tmp_we.s1 + tmp_we.s2;
- values0 += sum_weights * (int8)(INPUT_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += sum_weights * (int8)(INPUT_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif /* INPUT_OFFSET != 0 */
-
-#if K_OFFSET != 0
- values0 += (int8)(K_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += (int8)(K_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-#endif /* K_OFFSET != 0 */
-
-#if defined(REAL_MULTIPLIER)
-
- values0 = CONVERT(round(CONVERT(values0, float8) * (float8)REAL_MULTIPLIER), int8);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- int8 res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, output_multiplier, output_shift, 8);
- int8 res0_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, output_multiplier, output_shift, 8);
- values0 = select(res0_shift_lt0, res0_shift_gt0, (int8)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- values0 += (int8)OUTPUT_OFFSET;
- VEC_TYPE(8)
- res0 = CONVERT_SAT(values0, VEC_TYPE(8));
-
- vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
-#if defined(REAL_MULTIPLIER)
-
- values1 = CONVERT(round(CONVERT(values1, float8) * (float8)REAL_MULTIPLIER), int8);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- int8 res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, output_multiplier, output_shift, 8);
- int8 res1_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, output_multiplier, output_shift, 8);
- values1 = select(res1_shift_lt0, res1_shift_gt0, (int8)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- values1 += (int8)OUTPUT_OFFSET;
- VEC_TYPE(8)
- res1 = CONVERT_SAT(values1, VEC_TYPE(8));
-
- vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-}
-
-#else // !defined(IS_DOT8)
-
-#if DILATION_X == 1
-#if CONV_STRIDE_X == 1
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(8) \
- temp0 = vload8(0, (__global DATA_TYPE *)(first_value)); \
- VEC_TYPE(2) \
- temp1 = vload2(0, (__global DATA_TYPE *)(first_value + 8 * sizeof(DATA_TYPE))); \
- \
- left = temp0.s01234567; \
- middle = (VEC_TYPE(8))(temp0.s1234, temp0.s567, temp1.s0); \
- right = (VEC_TYPE(8))(temp0.s2345, temp0.s67, temp1.s01); \
- })
-#elif CONV_STRIDE_X == 2
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(16) \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \
- DATA_TYPE temp1 = *((__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))); \
- \
- left = temp0.s02468ace; \
- middle = temp0.s13579bdf; \
- right = (VEC_TYPE(8))(temp0.s2468, temp0.sace, temp1); \
- })
-#else /* CONV_STRIDE_X */
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(16) \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \
- VEC_TYPE(8) \
- temp1 = vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))); \
- \
- left = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \
- middle = (VEC_TYPE(8))(temp0.s147a, temp0.sd, temp1.s036); \
- right = (VEC_TYPE(8))(temp0.s258b, temp0.se, temp1.s147); \
- })
-#endif /* CONV_STRIDE_X */
-#else /*DILATION_X==1*/
-
-#if CONV_STRIDE_X == 1
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- left = vload8(0, (__global DATA_TYPE *)(first_value)); \
- middle = vload8(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \
- right = vload8(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))); \
- })
-#elif CONV_STRIDE_X == 2
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(16) \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \
- left = temp0.s02468ace; \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \
- middle = temp0.s02468ace; \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))); \
- right = temp0.s02468ace; \
- })
-#else /* CONV_STRIDE_X */
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(16) \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \
- VEC_TYPE(8) \
- temp1 = vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE)))); \
- left = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \
- \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \
- temp1 = vload8(0, (__global DATA_TYPE *)(first_value + (16 + DILATION_X) * sizeof(DATA_TYPE))); \
- middle = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \
- \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))); \
- temp1 = vload8(0, (__global DATA_TYPE *)(first_value + (16 + 2 * DILATION_X) * sizeof(DATA_TYPE))); \
- right = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \
- })
-
-#endif /* CONV_STRIDE_X */
-#endif /*DILATION_X==1*/
-/** This function computes the depthwise convolution quantized using dot product when the data layout is NCHW.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32
- * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes)
- * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector
- * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32
- * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes)
- * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-
-__kernel void dwc_3x3_native_quantized8_dot8_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
- VECTOR_DECLARATION(output_multipliers),
- VECTOR_DECLARATION(output_shifts)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z;
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_multipliers);
- Vector output_shifts = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_shifts);
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
-
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- const int bias_value = *((__global int *)(vector_offset(&biases, channel)));
-#endif //defined(HAS_BIAS)
-
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- src_addr -= batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z + (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
-
- VEC_TYPE(3)
- w0 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 0 * weights_stride_y));
- VEC_TYPE(3)
- w1 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 1 * weights_stride_y));
- VEC_TYPE(3)
- w2 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * weights_stride_y));
-
- const int output_multiplier = *((__global int *)vector_offset(&output_multipliers, 0));
- const int output_shift = *((__global int *)vector_offset(&output_shifts, 0));
-
- VEC_TYPE(8)
- left0, middle0, right0;
- VEC_TYPE(8)
- left1, middle1, right1;
- VEC_TYPE(8)
- left2, middle2, right2;
-
- int8 values0 = 0;
- int8 sum0 = 0;
-
- GET_VALUES(src_addr + 0 * src_stride_y, left0, middle0, right0);
- GET_VALUES(src_addr + DILATION_Y * src_stride_y, left1, middle1, right1);
- GET_VALUES(src_addr + 2 * DILATION_Y * src_stride_y, left2, middle2, right2);
-
-#if WEIGHTS_OFFSET != 0
- sum0 += convert_int8(left0) + convert_int8(middle0) + convert_int8(right0);
- sum0 += convert_int8(left1) + convert_int8(middle1) + convert_int8(right1);
- sum0 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2);
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- // If conv_stride_y is equals to 1, we compute two output rows
-
- VEC_TYPE(8)
- left3, middle3, right3;
- int8 values1 = 0;
- int8 sum1 = 0;
-
- GET_VALUES(src_addr + 3 * src_stride_y, left3, middle3, right3);
-
-#if WEIGHTS_OFFSET != 0
- sum1 += convert_int8(left1) + convert_int8(middle1) + convert_int8(right1);
- sum1 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2);
- sum1 += convert_int8(left3) + convert_int8(middle3) + convert_int8(right3);
-#endif /* WEIGHTS_OFFSET != 0 */
-#endif // CONV_STRIDE_Y == 1 && DILATION_Y==1
-
- ARM_DOT((VEC_TYPE(4))(left0.s0, middle0.s0, right0.s0, left1.s0), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s0);
- ARM_DOT((VEC_TYPE(4))(middle1.s0, right1.s0, left2.s0, middle2.s0), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s0);
- values0.s0 += right2.s0 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s1, middle0.s1, right0.s1, left1.s1), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s1);
- ARM_DOT((VEC_TYPE(4))(middle1.s1, right1.s1, left2.s1, middle2.s1), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s1);
- values0.s1 += right2.s1 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s2, middle0.s2, right0.s2, left1.s2), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s2);
- ARM_DOT((VEC_TYPE(4))(middle1.s2, right1.s2, left2.s2, middle2.s2), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s2);
- values0.s2 += right2.s2 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s3, middle0.s3, right0.s3, left1.s3), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s3);
- ARM_DOT((VEC_TYPE(4))(middle1.s3, right1.s3, left2.s3, middle2.s3), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s3);
- values0.s3 += right2.s3 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s4, middle0.s4, right0.s4, left1.s4), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s4);
- ARM_DOT((VEC_TYPE(4))(middle1.s4, right1.s4, left2.s4, middle2.s4), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s4);
- values0.s4 += right2.s4 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s5, middle0.s5, right0.s5, left1.s5), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s5);
- ARM_DOT((VEC_TYPE(4))(middle1.s5, right1.s5, left2.s5, middle2.s5), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s5);
- values0.s5 += right2.s5 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s6, middle0.s6, right0.s6, left1.s6), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s6);
- ARM_DOT((VEC_TYPE(4))(middle1.s6, right1.s6, left2.s6, middle2.s6), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s6);
- values0.s6 += right2.s6 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s7, middle0.s7, right0.s7, left1.s7), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s7);
- ARM_DOT((VEC_TYPE(4))(middle1.s7, right1.s7, left2.s7, middle2.s7), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s7);
- values0.s7 += right2.s7 * w2.s2;
-
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- ARM_DOT((VEC_TYPE(4))(left1.s0, middle1.s0, right1.s0, left2.s0), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s0);
- ARM_DOT((VEC_TYPE(4))(middle2.s0, right2.s0, left3.s0, middle3.s0), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s0);
- values1.s0 += right3.s0 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s1, middle1.s1, right1.s1, left2.s1), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s1);
- ARM_DOT((VEC_TYPE(4))(middle2.s1, right2.s1, left3.s1, middle3.s1), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s1);
- values1.s1 += right3.s1 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s2, middle1.s2, right1.s2, left2.s2), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s2);
- ARM_DOT((VEC_TYPE(4))(middle2.s2, right2.s2, left3.s2, middle3.s2), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s2);
- values1.s2 += right3.s2 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s3, middle1.s3, right1.s3, left2.s3), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s3);
- ARM_DOT((VEC_TYPE(4))(middle2.s3, right2.s3, left3.s3, middle3.s3), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s3);
- values1.s3 += right3.s3 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s4, middle1.s4, right1.s4, left2.s4), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s4);
- ARM_DOT((VEC_TYPE(4))(middle2.s4, right2.s4, left3.s4, middle3.s4), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s4);
- values1.s4 += right3.s4 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s5, middle1.s5, right1.s5, left2.s5), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s5);
- ARM_DOT((VEC_TYPE(4))(middle2.s5, right2.s5, left3.s5, middle3.s5), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s5);
- values1.s5 += right3.s5 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s6, middle1.s6, right1.s6, left2.s6), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s6);
- ARM_DOT((VEC_TYPE(4))(middle2.s6, right2.s6, left3.s6, middle3.s6), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s6);
- values1.s6 += right3.s6 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s7, middle1.s7, right1.s7, left2.s7), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s7);
- ARM_DOT((VEC_TYPE(4))(middle2.s7, right2.s7, left3.s7, middle3.s7), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s7);
- values1.s7 += right3.s7 * w2.s2;
-#endif // CONV_STRIDE_Y == 1 && DILATION_Y==1
-
-#if defined(HAS_BIAS)
- values0 += (int8)(bias_value);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += (int8)(bias_value);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif //defined(HAS_BIAS)
-
-#if WEIGHTS_OFFSET != 0
- values0 += sum0 * (int8)(WEIGHTS_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += sum1 * (int8)(WEIGHTS_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if INPUT_OFFSET != 0
- WEIGHTS_PROMOTED_TYPE sum_weights = 0;
- VEC_WEIGHTS_PROMOTED_TYPE(3)
- tmp_we = CONVERT(w0, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w1, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w2, VEC_WEIGHTS_PROMOTED_TYPE(3));
- sum_weights += tmp_we.s0 + tmp_we.s1 + tmp_we.s2;
- values0 += sum_weights * (int8)(INPUT_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += sum_weights * (int8)(INPUT_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-#endif /* INPUT_OFFSET != 0 */
-
-#if K_OFFSET != 0
- values0 += (int8)(K_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += (int8)(K_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-#endif /* K_OFFSET != 0 */
-
-#if defined(REAL_MULTIPLIER)
-
- values0 = CONVERT(round(CONVERT(values0, float8) * (float8)REAL_MULTIPLIER), int8);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- int8 res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, output_multiplier, output_shift, 8);
- int8 res0_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, output_multiplier, output_shift, 8);
- values0 = select(res0_shift_lt0, res0_shift_gt0, (int8)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- values0 += (int8)OUTPUT_OFFSET;
- VEC_TYPE(8)
- res0 = CONVERT_SAT(values0, VEC_TYPE(8));
-
- vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
-
-#if defined(REAL_MULTIPLIER)
-
- values1 = CONVERT(round(CONVERT(values1, float8) * (float8)REAL_MULTIPLIER), int8);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- int8 res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, output_multiplier, output_shift, 8);
- int8 res1_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, output_multiplier, output_shift, 8);
- values1 = select(res1_shift_lt0, res1_shift_gt0, (int8)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- values1 += (int8)OUTPUT_OFFSET;
- VEC_TYPE(8)
- res1 = CONVERT_SAT(values1, VEC_TYPE(8));
-
- vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-}
-
-#endif // !defined(IS_DOT8)
-
-#endif /* defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) */
-
-#if defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT)
-
-#define asymm_mult_by_quant_multiplier_less_than_one(x, y, z) ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, y, z, VEC_SIZE)
-
-#define MULTIPLY_ADD(x, y, acc) acc += CONVERT(CONVERT(x, VEC_WEIGHTS_PROMOTED_TYPE(VEC_SIZE)) * CONVERT(y, VEC_WEIGHTS_PROMOTED_TYPE(VEC_SIZE)), VEC_INT)
-
-#if WEIGHTS_OFFSET != 0
-#define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) \
- ({ \
- sum += CONVERT(x, VEC_INT); \
- MULTIPLY_ADD(x, y, acc); \
- })
-#else /* WEIGHTS_OFFSET != 0 */
-#define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) MULTIPLY_ADD(x, y, acc)
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-#define DOT_PRODUCT(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1) \
- ({ \
- ARM_DOT((VEC_TYPE(4))(val0, val1, val2, val3), w0.s0123, acc); \
- ARM_DOT((VEC_TYPE(4))(val4, val5, val6, val7), w0.s4567, acc); \
- acc += val8 * w1; \
- })
-
-#define DOT_PRODUCT_REDUCTION(sum, val0, val1, val2, val3, val4, val5, val6, val7, val8) \
- ({ \
- sum = val0; \
- ARM_DOT((VEC_TYPE(4))(val1, val2, val3, val4), (VEC_TYPE(4))1, sum); \
- ARM_DOT((VEC_TYPE(4))(val5, val6, val7, val8), (VEC_TYPE(4))1, sum); \
- })
-
-#define DOT_PRODUCT_REDUCTION_WEIGHTS(sum, w0, w1) \
- ({ \
- sum = w1; \
- ARM_DOT(w0.s0123, (VEC_TYPE(4))1, sum); \
- ARM_DOT(w0.s4567, (VEC_TYPE(4))1, sum); \
- })
-
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-
-#if defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && VEC_SIZE == 4
-/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width or height is not 1.
- *
- * @note This kernel assumes VEC_SIZE is 4.
- * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel.
- * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
- * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X)
- * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1)
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor reshaped. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32
- * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes)
- * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector
- * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32
- * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes)
- * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- * @param[in] max_offset Max offset for the input tensor
- */
-__kernel void dwc_3x3_reshaped_quantized8_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- IMAGE_DECLARATION(weights),
- VECTOR_DECLARATION(output_multipliers),
- VECTOR_DECLARATION(output_shifts),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- int max_offset)
-{
- const int x = get_global_id(0); // channels
- const int y = get_global_id(1); // spatial coordinate x
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y;
-
-#if defined(DST_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE;
-#endif /* defined(DST_DEPTH) */
-
- int z_coord = 0;
- int4 offset = 0;
- int4 y_coord = ((int4)(y * CONV_STRIDE_X) + (int4)(0, DILATION_X * 1, DILATION_X * 2, DILATION_X * 3)) - (int)CONV_PAD_LEFT;
-
- // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1
- y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1);
- y_coord.s1 = min((uint)y_coord.s1, (uint)SRC_DIM_1);
- y_coord.s2 = min((uint)y_coord.s2, (uint)SRC_DIM_1);
- y_coord.s3 = min((uint)y_coord.s3, (uint)SRC_DIM_1);
-
- int4 y_offset = convert_int4(y_coord * (int)src_stride_y);
-
- // We compute VEC_SIZEx1x1 [C,W,H] elements
- VEC_INT acc = 0, sum = 0;
-
- // Load weights
- VEC_DATA_TYPE(WEIGHTS_TYPE, 16)
- w0_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr));
- VEC_DATA_TYPE(WEIGHTS_TYPE, 16)
- w1_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr + 16));
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w8 = VLOAD(4)(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * 16));
-
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w0 = w0_tmp.s0123;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w1 = w0_tmp.s4567;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w2 = w0_tmp.s89AB;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w3 = w0_tmp.sCDEF;
-
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w4 = w1_tmp.s0123;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w5 = w1_tmp.s4567;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w6 = w1_tmp.s89AB;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w7 = w1_tmp.sCDEF;
-
-#if INPUT_OFFSET != 0
- VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT)
- + CONVERT(w3, VEC_INT) + CONVERT(w4, VEC_INT) + CONVERT(w5, VEC_INT)
- + CONVERT(w6, VEC_INT) + CONVERT(w7, VEC_INT) + CONVERT(w8, VEC_INT);
-#endif /* INPUT_OFFSET != 0 */
-
- // Load input values
- // z == 0
- // Clamp z_coord as for z = 0, it can be negative
- // z_coord is casted to unsigned int in order to use just a min() operation
- // A "-1" 32 bit signed variable converted to unsigned gives 4294967295
- z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP;
- z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
- offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2);
- VEC_TYPE(VEC_SIZE)
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
-
- // z == 1
- // z_coord can be only negative for z = 0 so we do not need to clamp it
- // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset
- z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y;
- z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
- offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2);
- VEC_TYPE(VEC_SIZE)
- values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
-
- // z == 2
- // Offset can be out-of-bound so we need to check if it is greater than max_offset
- z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y * 2;
- z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
- offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2);
- VEC_TYPE(VEC_SIZE)
- values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
-
- MULTIPLY_ADD_ACCUMULATE(values0, w0, acc, sum);
- MULTIPLY_ADD_ACCUMULATE(values1, w1, acc, sum);
- MULTIPLY_ADD_ACCUMULATE(values2, w2, acc, sum);
-
- MULTIPLY_ADD_ACCUMULATE(values3, w3, acc, sum);
- MULTIPLY_ADD_ACCUMULATE(values4, w4, acc, sum);
- MULTIPLY_ADD_ACCUMULATE(values5, w5, acc, sum);
-
- MULTIPLY_ADD_ACCUMULATE(values6, w6, acc, sum);
- MULTIPLY_ADD_ACCUMULATE(values7, w7, acc, sum);
- MULTIPLY_ADD_ACCUMULATE(values8, w8, acc, sum);
-
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT(biases);
- VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr);
- acc += bias_values;
-#endif // defined(HAS_BIAS)
-
-#if WEIGHTS_OFFSET != 0
- acc += WEIGHTS_OFFSET * sum;
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if INPUT_OFFSET != 0
- acc += INPUT_OFFSET * sum_we;
-#endif /* INPUT_OFFSET != 0 */
-
-#if K_OFFSET != 0
- acc += (VEC_INT)K_OFFSET;
-#endif /* K_OFFSET != 0 */
-
-#if defined(REAL_MULTIPLIER)
-
- acc = CONVERT(round(CONVERT(acc, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT(output_multipliers);
- Vector output_shifts = CONVERT_TO_VECTOR_STRUCT(output_shifts);
- VEC_INT output_multiplier = VLOAD(VEC_SIZE)(0, (__global int *)output_multipliers.ptr);
- VEC_INT output_shift = VLOAD(VEC_SIZE)(0, (__global int *)output_shifts.ptr);
-
- VEC_INT res_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc, output_multiplier, output_shift, VEC_SIZE);
- VEC_INT res_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc, output_multiplier, output_shift);
- acc = select(res_shift_lt0, res_shift_gt0, output_shift >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- acc = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE);
-#else // OUTPUT_SHIFT < 0
- acc = asymm_mult_by_quant_multiplier_less_than_one(acc, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
-#endif // OUTPUT_SHIFT < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- acc += (VEC_INT)OUTPUT_OFFSET;
-
- VEC_TYPE(VEC_SIZE)
- res = CONVERT_SAT(acc, VEC_TYPE(VEC_SIZE));
-
-#if defined(DST_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z + b * dst_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z;
-#endif /* defined(DST_DEPTH) */
-
- VSTORE(VEC_SIZE)
- (ACTIVATION_FUNC(res), 0, (__global DATA_TYPE *)(dst_addr));
-}
-#endif // defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y)
-
-#if defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED) && VEC_SIZE == 4
-/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1.
- *
- * @note This kernel assumes VEC_SIZE is 4.
- * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel.
- * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2)
- * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1).
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32
- * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes)
- * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector
- * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32
- * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes)
- * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- * @param[in] max_offset Max offset for the input tensor
- */
-
-__kernel void dwc_3x3_reshaped_quantized8_stride1_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- IMAGE_DECLARATION(weights),
- VECTOR_DECLARATION(output_multipliers),
- VECTOR_DECLARATION(output_shifts),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- int max_offset)
-{
- int x = get_global_id(0);
- int y = get_global_id(1);
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y;
-
-#if defined(DST_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE;
-#endif /* defined(DST_DEPTH) */
-
- int z_coord = 0;
- int4 offset = 0;
- int4 y_coord = ((int4)(y * NUM_ROWS_PROCESSED) + (int4)(0, 1, 2, 3)) - (int)CONV_PAD_LEFT;
-
- // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1
- y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1);
- y_coord.s1 = min((uint)y_coord.s1, (uint)SRC_DIM_1);
- y_coord.s2 = min((uint)y_coord.s2, (uint)SRC_DIM_1);
- y_coord.s3 = min((uint)y_coord.s3, (uint)SRC_DIM_1);
-
- int4 y_offset = convert_int4(y_coord * (int)src_stride_y);
-
- // We compute 4x2x2 [C,W,H] elements
- VEC_INT acc0 = 0, sum0 = 0;
- VEC_INT acc1 = 0, sum1 = 0;
- VEC_INT acc2 = 0, sum2 = 0;
- VEC_INT acc3 = 0, sum3 = 0;
-
- // Load weights
- VEC_DATA_TYPE(WEIGHTS_TYPE, 16)
- w0_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr));
- VEC_DATA_TYPE(WEIGHTS_TYPE, 16)
- w1_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr + 16));
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w8 = VLOAD(4)(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * 16));
-
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w0 = w0_tmp.s0123;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w1 = w0_tmp.s4567;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w2 = w0_tmp.s89AB;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w3 = w0_tmp.sCDEF;
-
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w4 = w1_tmp.s0123;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w5 = w1_tmp.s4567;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w6 = w1_tmp.s89AB;
- VEC_DATA_TYPE(WEIGHTS_TYPE, 4)
- w7 = w1_tmp.sCDEF;
-
-#if INPUT_OFFSET != 0
- VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT)
- + CONVERT(w3, VEC_INT) + CONVERT(w4, VEC_INT) + CONVERT(w5, VEC_INT)
- + CONVERT(w6, VEC_INT) + CONVERT(w7, VEC_INT) + CONVERT(w8, VEC_INT);
-#endif /* INPUT_OFFSET != 0 */
-
- // Load input values
- // z == 0
- // Clamp z_coord as for z = 0, it can be negative
- // z_coord is casted to unsigned int in order to use just a min() operation
- // A "-1" 32 bit signed variable converted to unsigned gives 4294967295
- z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP;
- z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
- offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2);
- VEC_TYPE(VEC_SIZE)
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
- VEC_TYPE(VEC_SIZE)
- values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3));
-
- // z == 1
- z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP + 1;
- z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
- offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2);
- VEC_TYPE(VEC_SIZE)
- values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
- VEC_TYPE(VEC_SIZE)
- values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3));
-
- // z == 2
- z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP + 2;
- z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
- offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2);
- VEC_TYPE(VEC_SIZE)
- values8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values9 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values10 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
- VEC_TYPE(VEC_SIZE)
- values11 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3));
-
- // z == 3
- z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP + 3;
- z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
- offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2);
- VEC_TYPE(VEC_SIZE)
- values12 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values13 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values14 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
- VEC_TYPE(VEC_SIZE)
- values15 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3));
-
- MULTIPLY_ADD_ACCUMULATE(values0, w0, acc0, sum0);
- MULTIPLY_ADD_ACCUMULATE(values1, w1, acc0, sum0);
- MULTIPLY_ADD_ACCUMULATE(values2, w2, acc0, sum0);
- MULTIPLY_ADD_ACCUMULATE(values1, w0, acc1, sum1);
- MULTIPLY_ADD_ACCUMULATE(values2, w1, acc1, sum1);
- MULTIPLY_ADD_ACCUMULATE(values3, w2, acc1, sum1);
-
- MULTIPLY_ADD_ACCUMULATE(values4, w3, acc0, sum0);
- MULTIPLY_ADD_ACCUMULATE(values5, w4, acc0, sum0);
- MULTIPLY_ADD_ACCUMULATE(values6, w5, acc0, sum0);
- MULTIPLY_ADD_ACCUMULATE(values5, w3, acc1, sum1);
- MULTIPLY_ADD_ACCUMULATE(values6, w4, acc1, sum1);
- MULTIPLY_ADD_ACCUMULATE(values7, w5, acc1, sum1);
-
- MULTIPLY_ADD_ACCUMULATE(values8, w6, acc0, sum0);
- MULTIPLY_ADD_ACCUMULATE(values9, w7, acc0, sum0);
- MULTIPLY_ADD_ACCUMULATE(values10, w8, acc0, sum0);
- MULTIPLY_ADD_ACCUMULATE(values9, w6, acc1, sum1);
- MULTIPLY_ADD_ACCUMULATE(values10, w7, acc1, sum1);
- MULTIPLY_ADD_ACCUMULATE(values11, w8, acc1, sum1);
-
- MULTIPLY_ADD_ACCUMULATE(values4, w0, acc2, sum2);
- MULTIPLY_ADD_ACCUMULATE(values5, w1, acc2, sum2);
- MULTIPLY_ADD_ACCUMULATE(values6, w2, acc2, sum2);
- MULTIPLY_ADD_ACCUMULATE(values5, w0, acc3, sum3);
- MULTIPLY_ADD_ACCUMULATE(values6, w1, acc3, sum3);
- MULTIPLY_ADD_ACCUMULATE(values7, w2, acc3, sum3);
-
- MULTIPLY_ADD_ACCUMULATE(values8, w3, acc2, sum2);
- MULTIPLY_ADD_ACCUMULATE(values9, w4, acc2, sum2);
- MULTIPLY_ADD_ACCUMULATE(values10, w5, acc2, sum2);
- MULTIPLY_ADD_ACCUMULATE(values9, w3, acc3, sum3);
- MULTIPLY_ADD_ACCUMULATE(values10, w4, acc3, sum3);
- MULTIPLY_ADD_ACCUMULATE(values11, w5, acc3, sum3);
-
- MULTIPLY_ADD_ACCUMULATE(values12, w6, acc2, sum2);
- MULTIPLY_ADD_ACCUMULATE(values13, w7, acc2, sum2);
- MULTIPLY_ADD_ACCUMULATE(values14, w8, acc2, sum2);
- MULTIPLY_ADD_ACCUMULATE(values13, w6, acc3, sum3);
- MULTIPLY_ADD_ACCUMULATE(values14, w7, acc3, sum3);
- MULTIPLY_ADD_ACCUMULATE(values15, w8, acc3, sum3);
-
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT(biases);
-
- VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr);
-
- acc0 += bias_values;
- acc1 += bias_values;
- acc2 += bias_values;
- acc3 += bias_values;
-#endif /* defined(HAS_BIAS) */
-
-#if WEIGHTS_OFFSET != 0
- acc0 += WEIGHTS_OFFSET * sum0;
- acc1 += WEIGHTS_OFFSET * sum1;
- acc2 += WEIGHTS_OFFSET * sum2;
- acc3 += WEIGHTS_OFFSET * sum3;
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if INPUT_OFFSET != 0
- VEC_INT offs = INPUT_OFFSET * sum_we;
-
- acc0 += offs;
- acc1 += offs;
- acc2 += offs;
- acc3 += offs;
-#endif /* INPUT_OFFSET != 0 */
-
-#if K_OFFSET != 0
- acc0 += (VEC_INT)K_OFFSET;
- acc1 += (VEC_INT)K_OFFSET;
- acc2 += (VEC_INT)K_OFFSET;
- acc3 += (VEC_INT)K_OFFSET;
-#endif /* K_OFFSET != 0 */
-
-#if defined(REAL_MULTIPLIER)
-
- acc0 = CONVERT(round(CONVERT(acc0, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT);
- acc1 = CONVERT(round(CONVERT(acc1, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT);
- acc2 = CONVERT(round(CONVERT(acc2, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT);
- acc3 = CONVERT(round(CONVERT(acc3, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT(output_multipliers);
- Vector output_shifts = CONVERT_TO_VECTOR_STRUCT(output_shifts);
- VEC_INT output_multiplier = VLOAD(VEC_SIZE)(0, (__global int *)output_multipliers.ptr);
- VEC_INT output_shift = VLOAD(VEC_SIZE)(0, (__global int *)output_shifts.ptr);
-
- VEC_INT res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc0, output_multiplier, output_shift, VEC_SIZE);
- VEC_INT res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc1, output_multiplier, output_shift, VEC_SIZE);
- VEC_INT res2_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc2, output_multiplier, output_shift, VEC_SIZE);
- VEC_INT res3_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc3, output_multiplier, output_shift, VEC_SIZE);
- VEC_INT res0_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, output_multiplier, output_shift);
- VEC_INT res1_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc1, output_multiplier, output_shift);
- VEC_INT res2_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc2, output_multiplier, output_shift);
- VEC_INT res3_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc3, output_multiplier, output_shift);
- acc0 = select(res0_shift_lt0, res0_shift_gt0, output_shift >= 0);
- acc1 = select(res1_shift_lt0, res1_shift_gt0, output_shift >= 0);
- acc2 = select(res2_shift_lt0, res2_shift_gt0, output_shift >= 0);
- acc3 = select(res3_shift_lt0, res3_shift_gt0, output_shift >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- acc0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE);
- acc1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE);
- acc2 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc2, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE);
- acc3 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc3, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE);
-#else // OUTPUT_SHIFT < 0
- acc0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
- acc1 = asymm_mult_by_quant_multiplier_less_than_one(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
- acc2 = asymm_mult_by_quant_multiplier_less_than_one(acc2, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
- acc3 = asymm_mult_by_quant_multiplier_less_than_one(acc3, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
-#endif // OUTPUT_SHIFT < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- acc0 += (VEC_INT)OUTPUT_OFFSET;
- acc1 += (VEC_INT)OUTPUT_OFFSET;
- acc2 += (VEC_INT)OUTPUT_OFFSET;
- acc3 += (VEC_INT)OUTPUT_OFFSET;
-
- VEC_TYPE(VEC_SIZE)
- res0 = CONVERT_SAT(acc0, VEC_TYPE(VEC_SIZE));
- VEC_TYPE(VEC_SIZE)
- res1 = CONVERT_SAT(acc1, VEC_TYPE(VEC_SIZE));
- VEC_TYPE(VEC_SIZE)
- res2 = CONVERT_SAT(acc2, VEC_TYPE(VEC_SIZE));
- VEC_TYPE(VEC_SIZE)
- res3 = CONVERT_SAT(acc3, VEC_TYPE(VEC_SIZE));
-
-#if defined(DST_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + (z * NUM_PLANES_PROCESSED) * dst_step_z + b * dst_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + (z * NUM_PLANES_PROCESSED) * dst_step_z;
-#endif /* defined(DST_DEPTH) */
-
- VSTORE(VEC_SIZE)
- (ACTIVATION_FUNC(res0), 0, dst_addr + 0 * dst_stride_y);
- VSTORE(VEC_SIZE)
- (ACTIVATION_FUNC(res1), 0, dst_addr + 1 * dst_stride_y);
-
-#if((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
- if((z * NUM_PLANES_PROCESSED + 1) < DST_DIM_2)
-#endif // ((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
- {
- VSTORE(VEC_SIZE)
- (ACTIVATION_FUNC(res2), 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y + 1 * dst_stride_z));
- VSTORE(VEC_SIZE)
- (ACTIVATION_FUNC(res3), 0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y + 1 * dst_stride_z));
- }
-}
-
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && VEC_SIZE == 4
-/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1 using dot product.
- *
- * @note Per-channel quantization is not supported by this kernel.
- * @note This kernel assumes VEC_SIZE is 4.
- * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel.
- * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2)
- * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1).
- * @note If REAL_MULTIPLIER is passed at compile time (i.e. -DREAL_MULTIPLIER=1.355f), the final quantization is performed using a floating point multiplication.
- * If not, the quantization will be performed using a fixed point multiplication
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32
- * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes)
- * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector
- * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32
- * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes)
- * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- * @param[in] max_offset The maximum allowed offset for the input tensor
- */
-__kernel void dwc_3x3_reshaped_quantized8_dot8_stride1_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- IMAGE_DECLARATION(weights),
- VECTOR_DECLARATION(output_multipliers),
- VECTOR_DECLARATION(output_shifts),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(biases),
-#endif // defined(HAS_BIAS)
- int max_offset)
-{
- int x = get_global_id(0);
- int y = get_global_id(1);
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y;
-
-#if defined(DST_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE;
-#endif /* defined(DST_DEPTH) */
-
- int z_coord = 0;
- int4 offset = 0;
- int4 y_coord = ((int4)(y * NUM_ROWS_PROCESSED) + (int4)(0, 1, 2, 3)) - (int)CONV_PAD_LEFT;
-
- // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1
- y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1);
- y_coord.s1 = min((uint)y_coord.s1, (uint)SRC_DIM_1);
- y_coord.s2 = min((uint)y_coord.s2, (uint)SRC_DIM_1);
- y_coord.s3 = min((uint)y_coord.s3, (uint)SRC_DIM_1);
-
- int4 y_offset = convert_int4(y_coord * (int)src_stride_y);
-
- // We compute 4x2x1 [C,W,H] elements
- VEC_INT acc0 = 0;
- VEC_INT acc1 = 0;
- VEC_INT sum0 = 0;
- VEC_INT sum1 = 0;
-
- // Load weights
- VEC_TYPE(16)
- w0 = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr));
- VEC_TYPE(16)
- w1 = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr + 16));
- VEC_TYPE(4)
- w2 = VLOAD(4)(0, (__global WEIGHTS_TYPE *)(weights_addr + 32));
-
-#if INPUT_OFFSET != 0
- // Initilize the final result with the weights reduction multiplied by INPUT_OFFSET
- DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s0, w0.s01234567, w0.s8);
- DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s1, (VEC_TYPE(8))((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1);
- DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s2, w1.s23456789, w1.sA);
- DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s3, (VEC_TYPE(8))((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3);
-
- // Multiply the weights reduction with INPUT_OFFSET
- acc0 = INPUT_OFFSET * acc0;
-
- acc1 = acc0;
-#endif // INPUT_OFFSET != 0
-
- // Load input values
- // z == 0
- // Clamp z_coord as for z = 0, it can be negative
- // z_coord is casted to unsigned int in order to use just a min() operation
- // A "-1" 32 bit signed variable converted to unsigned gives 4294967295
- z_coord = z - (int)CONV_PAD_TOP;
- z_coord = min((uint)z_coord, (uint)SRC_DIM_2);
- offset = y_offset + (int4)(z_coord * src_stride_z);
- offset = min(offset, (int4)max_offset);
-
- VEC_TYPE(VEC_SIZE)
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
- VEC_TYPE(VEC_SIZE)
- values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3));
-
- // z == 1
- // z_coord can be only negative for z = 0 so we do not need to clamp it
- // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset
- z_coord = z - (int)CONV_PAD_TOP + 1;
- offset = y_offset + (int4)(z_coord * src_stride_z);
- VEC_TYPE(VEC_SIZE)
- values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
- VEC_TYPE(VEC_SIZE)
- values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3));
-
- // z == 2
- // After z = 1 we can simply add src_stride_z to offset without updating z_coord
- // However offset can be out-of-bound so we need to check if it is greater than max_offset
- offset += (int4)src_stride_z;
- offset = min(offset, (int4)max_offset);
- VEC_TYPE(VEC_SIZE)
- values8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0));
- VEC_TYPE(VEC_SIZE)
- values9 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1));
- VEC_TYPE(VEC_SIZE)
- values10 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2));
- VEC_TYPE(VEC_SIZE)
- values11 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3));
-
- DOT_PRODUCT_REDUCTION(sum0.s0, values0.s0, values1.s0, values2.s0, values4.s0, values5.s0, values6.s0, values8.s0, values9.s0, values10.s0);
- DOT_PRODUCT_REDUCTION(sum1.s0, values1.s0, values2.s0, values3.s0, values5.s0, values6.s0, values7.s0, values9.s0, values10.s0, values11.s0);
- DOT_PRODUCT(acc0.s0, values0.s0, values1.s0, values2.s0, values4.s0, values5.s0, values6.s0, values8.s0, values9.s0, values10.s0, w0.s01234567, w0.s8);
- DOT_PRODUCT(acc1.s0, values1.s0, values2.s0, values3.s0, values5.s0, values6.s0, values7.s0, values9.s0, values10.s0, values11.s0, w0.s01234567, w0.s8);
-
- DOT_PRODUCT_REDUCTION(sum0.s1, values0.s1, values1.s1, values2.s1, values4.s1, values5.s1, values6.s1, values8.s1, values9.s1, values10.s1);
- DOT_PRODUCT_REDUCTION(sum1.s1, values1.s1, values2.s1, values3.s1, values5.s1, values6.s1, values7.s1, values9.s1, values10.s1, values11.s1);
- DOT_PRODUCT(acc0.s1, values0.s1, values1.s1, values2.s1, values4.s1, values5.s1, values6.s1, values8.s1, values9.s1, values10.s1, (VEC_TYPE(8))((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1);
- DOT_PRODUCT(acc1.s1, values1.s1, values2.s1, values3.s1, values5.s1, values6.s1, values7.s1, values9.s1, values10.s1, values11.s1, (VEC_TYPE(8))((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1);
-
- DOT_PRODUCT_REDUCTION(sum0.s2, values0.s2, values1.s2, values2.s2, values4.s2, values5.s2, values6.s2, values8.s2, values9.s2, values10.s2);
- DOT_PRODUCT_REDUCTION(sum1.s2, values1.s2, values2.s2, values3.s2, values5.s2, values6.s2, values7.s2, values9.s2, values10.s2, values11.s2);
- DOT_PRODUCT(acc0.s2, values0.s2, values1.s2, values2.s2, values4.s2, values5.s2, values6.s2, values8.s2, values9.s2, values10.s2, w1.s23456789, w1.sA);
- DOT_PRODUCT(acc1.s2, values1.s2, values2.s2, values3.s2, values5.s2, values6.s2, values7.s2, values9.s2, values10.s2, values11.s2, w1.s23456789, w1.sA);
-
- DOT_PRODUCT_REDUCTION(sum0.s3, values0.s3, values1.s3, values2.s3, values4.s3, values5.s3, values6.s3, values8.s3, values9.s3, values10.s3);
- DOT_PRODUCT_REDUCTION(sum1.s3, values1.s3, values2.s3, values3.s3, values5.s3, values6.s3, values7.s3, values9.s3, values10.s3, values11.s3);
- DOT_PRODUCT(acc0.s3, values0.s3, values1.s3, values2.s3, values4.s3, values5.s3, values6.s3, values8.s3, values9.s3, values10.s3, (VEC_TYPE(8))((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3);
- DOT_PRODUCT(acc1.s3, values1.s3, values2.s3, values3.s3, values5.s3, values6.s3, values7.s3, values9.s3, values10.s3, values11.s3, (VEC_TYPE(8))((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3);
-
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT(biases);
-
- VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr);
-
- acc0 += bias_values;
- acc1 += bias_values;
-
-#endif // defined(HAS_BIAS)
-
-#if WEIGHTS_OFFSET != 0
- acc0 += WEIGHTS_OFFSET * sum0;
- acc1 += WEIGHTS_OFFSET * sum1;
-#endif // WEIGHTS_OFFSET != 0
-
-#if K_OFFSET != 0
- acc0 += (VEC_INT)K_OFFSET;
- acc1 += (VEC_INT)K_OFFSET;
-
-#endif // K_OFFSET != 0
-
-#if defined(REAL_MULTIPLIER)
-
- acc0 = CONVERT(round(CONVERT(acc0, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT);
- acc1 = CONVERT(round(CONVERT(acc1, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if OUTPUT_SHIFT < 0
- acc0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE);
- acc1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE);
-#else // OUTPUT_SHIFT < 0
- acc0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
- acc1 = asymm_mult_by_quant_multiplier_less_than_one(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
-#endif // OUTPUT_SHIFT < 0
-
-#endif // defined(REAL_MULTIPLIER)
- acc0 += (VEC_INT)OUTPUT_OFFSET;
- acc1 += (VEC_INT)OUTPUT_OFFSET;
-
- VEC_TYPE(VEC_SIZE)
- res0 = CONVERT_SAT(acc0, VEC_TYPE(VEC_SIZE));
- VEC_TYPE(VEC_SIZE)
- res1 = CONVERT_SAT(acc1, VEC_TYPE(VEC_SIZE));
-
-#if defined(DST_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z + b * dst_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z;
-#endif /* defined(DST_DEPTH) */
-
- VSTORE(VEC_SIZE)
- (ACTIVATION_FUNC(res0), 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
- VSTORE(VEC_SIZE)
- (ACTIVATION_FUNC(res1), 0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
-}
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && VEC_SIZE==4
-
-#endif // defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED)
-
-#endif // defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT)
-
-#endif // defined(WEIGHTS_PROMOTED_TYPE)
-
-#endif // defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER))
-
-#if defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_SHIFT) && defined(OUTPUT_MULTIPLIER) && defined(VEC_SIZE_LEFTOVER)
-/** This function computes the depthwise convolution for NHWC data layout. This kernel assumes that the weights tensor is NOT reshaped
- *
- * @note The number of elements processed must be passed at compile time using -DN0 (e.g. -DN0=2)
- * @note The depth multiplier must be passed at compile time using -DDEPTH_MULTIPLIER (e.g. -DDEPTH_MULTIPLIER=1)
- * @note The first dimension of the input tensor must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM1=112)
- * @note The second dimension of the input tensor must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM2=80)
- * @note The kernel width must be passed at compile time using -DKERNEL_WIDTH (e.g. -DKERNEL_WIDTH=5)
- * @note The kernel height must be passed at compile time using -DKERNEL_HEIGHT (e.g. -DKERNEL_HEIGHT=5)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
- * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X)
- * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1)
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32
- * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes)
- * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector
- * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32
- * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes)
- * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void dwc_MxN_native_quantized8_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
- VECTOR_DECLARATION(output_multipliers),
- VECTOR_DECLARATION(output_shifts)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif // defined(HAS_BIAS)
-)
-{
- int x_offs = max((int)(get_global_id(0) * N0 - (N0 - VEC_SIZE_LEFTOVER) % N0), 0);
- int y = get_global_id(1); // spatial coordinate x
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE);
-
- __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER + y * dst_stride_y + z * dst_stride_z;
-
- __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + x_offs * sizeof(WEIGHTS_TYPE) * (int)DEPTH_MULTIPLIER;
-
-#if defined(HAS_BIAS)
- __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + x_offs * sizeof(int) * (int)DEPTH_MULTIPLIER;
-#endif // defined(HAS_BIAS)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- __global uchar *out_mul_addr = output_multipliers_ptr + output_multipliers_offset_first_element_in_bytes + x_offs * sizeof(int) * (int)DEPTH_MULTIPLIER;
- __global uchar *out_shift_addr = output_shifts_ptr + output_shifts_offset_first_element_in_bytes + x_offs * sizeof(int) * (int)DEPTH_MULTIPLIER;
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#if defined(DST_DEPTH)
- s_addr += b * src_stride_w;
- d_addr += b * dst_stride_w;
-#endif // defined(DST_DEPTH)
-
-#if DEPTH_MULTIPLIER > 1
- for(int d = 0; d < (int)DEPTH_MULTIPLIER; ++d)
- {
-#endif // DEPTH_MULTIPLIER > 1
- // Each work-item computes N0x1x1 elements
- VEC_INT res = 0;
-
- int x_coord = y * CONV_STRIDE_X - (int)CONV_PAD_LEFT;
- int y_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP;
-
- for(int yk = 0; yk < KERNEL_HEIGHT; ++yk)
- {
- if(y_coord >= 0 && y_coord < SRC_DIM2)
- {
- int x_coord_tmp = x_coord;
-
- for(int xk = 0; xk < KERNEL_WIDTH; ++xk)
- {
- if(x_coord_tmp >= 0 && x_coord_tmp < SRC_DIM1)
- {
- int s_offset = x_coord_tmp * (int)src_stride_y + y_coord * (int)src_stride_z;
- int w_offset = xk * weights_stride_y + yk * weights_stride_z;
-
- // Load input and weights values
- VEC_INT i = CONVERT(VLOAD(N0)(0, (__global DATA_TYPE *)(s_addr + s_offset)), VEC_INT);
- VEC_INT w = CONVERT(VLOAD(N0)(0, (__global WEIGHTS_TYPE *)(w_addr + w_offset)), VEC_INT);
-
- res += (i + (VEC_INT)INPUT_OFFSET) * (w + (VEC_INT)WEIGHTS_OFFSET);
- }
- x_coord_tmp += DILATION_X;
- }
- }
- y_coord += DILATION_Y;
- }
-
-#if defined(HAS_BIAS)
- VEC_INT bias = VLOAD(N0)(0, (__global int *)(b_addr));
- res += bias;
-#endif // defined(HAS_BIAS)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- VEC_INT output_multiplier = VLOAD(N0)(0, (__global int *)(out_mul_addr));
- VEC_INT output_shift = VLOAD(N0)(0, (__global int *)(out_shift_addr));
-
- VEC_INT res_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(res, output_multiplier, output_shift, N0);
- VEC_INT res_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(res, output_multiplier, output_shift, N0);
- res = select(res_shift_lt0, res_shift_gt0, (VEC_INT)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- res = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(res, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, N0);
-#else // OUTPUT_SHIFT < 0
- res = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(res, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, N0);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
- res += (VEC_INT)OUTPUT_OFFSET;
-
- VEC_TYPE(VEC_SIZE)
- res0 = CONVERT_SAT(res, VEC_TYPE(VEC_SIZE));
- res0 = ACTIVATION_FUNC(res0);
-
- STORE_VECTOR_SELECT(res, DATA_TYPE, d_addr, N0, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
-
-#if DEPTH_MULTIPLIER > 1
- w_addr += sizeof(WEIGHTS_TYPE);
- d_addr += sizeof(DATA_TYPE);
-#if defined(PER_CHANNEL_QUANTIZATION)
- out_mul_addr += sizeof(int);
- out_shift_addr += sizeof(int);
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-#if defined(HAS_BIAS)
- b_addr += sizeof(int);
-#endif // defined(HAS_BIAS)
- }
-#endif // DEPTH_MULTIPLIER > 1
-}
-#endif // defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defiend(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_SHIFT) && defined(OUTPUT_MULTIPLIER) && defined(VEC_SIZE_LEFTOVER)
-#endif // defined(DATA_TYPE) && defined(WEIGHTS_TYPE)
diff --git a/src/core/CL/cl_kernels/dequantization_layer.cl b/src/core/CL/cl_kernels/dequantization_layer.cl
deleted file mode 100644
index 127f67d940..0000000000
--- a/src/core/CL/cl_kernels/dequantization_layer.cl
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#if defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST) && defined(SCALE) && defined(OFFSET)
-
-/** This performs the dequantization of 8-bit unsigned integers to floating point.
- *
- * @note Source datatype should be given as a preprocessor argument using -DDATA_TYPE_SRC=type. e.g. -DDATA_TYPE_SRC=char
- * @note Destination datatype should be given as a preprocessor argument using -DDATA_TYPE_DST=type. e.g. -DDATA_TYPE_DST=float
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Quantization scale of input tensor is passed in with -DSCALE=scale.
- * @note Quantization offset of input tensor is passed in with -DOFFSET=offset.
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: F16/F32
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void dequantization_layer(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
-#if defined(LAST_ACCESSED_X)
- // Check if access on width gets out of bounds
- // If it does shift access vector to access elements within bounds
- const int xi = (int)(get_global_id(0) * VEC_SIZE);
- input.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * input_stride_x;
- output.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * output_stride_x;
-
- // Load data
- VEC_DATA_TYPE(int, VEC_SIZE)
- val = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_SRC *)input.ptr), VEC_DATA_TYPE(int, VEC_SIZE));
-
- // Create scale and offset vectors
- const VEC_DATA_TYPE(float, VEC_SIZE)
- vscale = SCALE;
-
- const VEC_DATA_TYPE(int, VEC_SIZE)
- voffset = OFFSET;
-
- // Dequantize
- VEC_DATA_TYPE(float, VEC_SIZE)
- res = vscale * CONVERT((val - voffset), VEC_DATA_TYPE(float, VEC_SIZE));
-
- // Store result
- VSTORE(VEC_SIZE)
- (CONVERT(res, VEC_DATA_TYPE(DATA_TYPE_DST, VEC_SIZE)), 0, (__global DATA_TYPE_DST *)output.ptr);
-#else // !defined(LAST_ACCESSED_X)
- *((__global DATA_TYPE_DST *)(output.ptr)) = (DATA_TYPE_DST)((float)((int)(*((__global DATA_TYPE_SRC *)(input.ptr))) - (int)(OFFSET)) * (float)(SCALE));
-#endif // defined(LAST_ACCESSED_X)
-}
-#endif // defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST) && defined(SCALE) && defined(OFFSET)
-
-#if defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST)
-/** This performs per channel dequantization of 8-bit signed integers to floating point. (NCHW)
- *
- * @note Source datatype should be given as a preprocessor argument using -DDATA_TYPE_SRC=type. e.g. -DDATA_TYPE_SRC=char
- * @note Destination datatype should be given as a preprocessor argument using -DDATA_TYPE_DST=type. e.g. -DDATA_TYPE_DST=float
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: QSYMM8_PER_CHANNEL
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: F16/F32
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] scale Pointer to buffer with the per channel quantized scales
- */
-__kernel void dequantization_layer_per_channel_nchw(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output),
- __global float *scale)
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
-#if defined(LAST_ACCESSED_X)
- // Check if access on width gets out of bounds
- // If it does shift access vector to access elements within bounds
- const int xi = (int)(get_global_id(0) * VEC_SIZE);
- input.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * input_stride_x;
- output.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * output_stride_x;
-
- // Load data
- VEC_DATA_TYPE(int, VEC_SIZE)
- val = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_SRC *)input.ptr), VEC_DATA_TYPE(int, VEC_SIZE));
-
- // Create scale vectors
- const VEC_DATA_TYPE(float, VEC_SIZE)
- vscale = scale[get_global_id(2)];
-
- // Dequantize
- VEC_DATA_TYPE(float, VEC_SIZE)
- res = vscale * CONVERT((val), VEC_DATA_TYPE(float, VEC_SIZE));
-
- // Store result
- VSTORE(VEC_SIZE)
- (CONVERT(res, VEC_DATA_TYPE(DATA_TYPE_DST, VEC_SIZE)), 0, (__global DATA_TYPE_DST *)output.ptr);
-#else // !defined(LAST_ACCESSED_X)
- *((__global DATA_TYPE_DST *)(output.ptr)) = (DATA_TYPE_DST)((float)((int)(*((__global DATA_TYPE_SRC *)(input.ptr)))) * scale[get_global_id(2)]);
-#endif // defined(LAST_ACCESSED_X)
-}
-/** This performs per channel dequantization of 8-bit signed integers to floating point. (NHWC)
- *
- * @note Source datatype should be given as a preprocessor argument using -DDATA_TYPE_SRC=type. e.g. -DDATA_TYPE_SRC=char
- * @note Destination datatype should be given as a preprocessor argument using -DDATA_TYPE_DST=type. e.g. -DDATA_TYPE_DST=float
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: QSYMM8_PER_CHANNEL
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: F16/F32
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] scale Pointer to buffer with the per channel quantized scales
- */
-__kernel void dequantization_layer_per_channel_nhwc(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output),
- __global float *scale)
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
-#if defined(LAST_ACCESSED_X)
- // Check if access on width gets out of bounds
- // If it does shift access vector to access elements within bounds
- const int xi = (int)(get_global_id(0) * VEC_SIZE);
- input.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * input_stride_x;
- output.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * output_stride_x;
- scale -= max(xi - (int)LAST_ACCESSED_X, 0);
-
- // Load data
- VEC_DATA_TYPE(int, VEC_SIZE)
- val = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_SRC *)input.ptr), VEC_DATA_TYPE(int, VEC_SIZE));
-
- // Create scale vectors
- const VEC_DATA_TYPE(float, VEC_SIZE)
- vscale = VLOAD(VEC_SIZE)(0, &scale[xi]);
-
- // Dequantize
- VEC_DATA_TYPE(float, VEC_SIZE)
- res = vscale * CONVERT((val), VEC_DATA_TYPE(float, VEC_SIZE));
-
- // Store result
- VSTORE(VEC_SIZE)
- (CONVERT(res, VEC_DATA_TYPE(DATA_TYPE_DST, VEC_SIZE)), 0, (__global DATA_TYPE_DST *)output.ptr);
-#else // !defined(LAST_ACCESSED_X)
- *((__global DATA_TYPE_DST *)(output.ptr)) = (DATA_TYPE_DST)((float)((int)(*((__global DATA_TYPE_SRC *)(input.ptr)))) * scale[get_global_id(0)]);
-#endif // defined(LAST_ACCESSED_X)
-}
-#endif // defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST)
diff --git a/src/core/CL/cl_kernels/direct_convolution.cl b/src/core/CL/cl_kernels/direct_convolution.cl
deleted file mode 100644
index 5d2a24e740..0000000000
--- a/src/core/CL/cl_kernels/direct_convolution.cl
+++ /dev/null
@@ -1,604 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "gemm_helpers.h"
-#include "helpers_asymm.h"
-#include "repeat.h"
-
-#if defined(IS_QUANTIZED)
-
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), (val));
-#elif defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-#define ARM_DOT(x, y, val) val += arm_dot((x), (y));
-#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define ARM_DOT(x, y, val) \
- ({ \
- val += (ACC_DATA_TYPE)x.s0 * (ACC_DATA_TYPE)y.s0; \
- val += (ACC_DATA_TYPE)x.s1 * (ACC_DATA_TYPE)y.s1; \
- val += (ACC_DATA_TYPE)x.s2 * (ACC_DATA_TYPE)y.s2; \
- val += (ACC_DATA_TYPE)x.s3 * (ACC_DATA_TYPE)y.s3; \
- })
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-
-#define ARM_DOT1(a, b, c) \
- ({ \
- ARM_DOT(((VEC_DATA_TYPE(SRC_DATA_TYPE, 4))(a, (VEC_DATA_TYPE(SRC_DATA_TYPE, 3))0)), ((VEC_DATA_TYPE(WEI_DATA_TYPE, 4))(b, (VEC_DATA_TYPE(WEI_DATA_TYPE, 3))0)), c); \
- })
-#define ARM_DOT2(a, b, c) \
- ({ \
- ARM_DOT(((VEC_DATA_TYPE(SRC_DATA_TYPE, 4))(a, (VEC_DATA_TYPE(SRC_DATA_TYPE, 2))0)), ((VEC_DATA_TYPE(WEI_DATA_TYPE, 4))(b, (VEC_DATA_TYPE(WEI_DATA_TYPE, 2))0)), c); \
- })
-#define ARM_DOT3(a, b, c) \
- ({ \
- ARM_DOT(((VEC_DATA_TYPE(SRC_DATA_TYPE, 4))(a, (SRC_DATA_TYPE)0)), ((VEC_DATA_TYPE(WEI_DATA_TYPE, 4))(b, (WEI_DATA_TYPE)0)), c); \
- })
-#define ARM_DOT4(a, b, c) \
- ({ \
- ARM_DOT(a, b, c); \
- })
-#define ARM_DOT8(a, b, c) \
- ({ \
- ARM_DOT4((a.lo), (b.lo), c); \
- ARM_DOT4((a.hi), (b.hi), c); \
- })
-#define ARM_DOT16(a, b, c) \
- ({ \
- ARM_DOT8((a.lo), (b.lo), c); \
- ARM_DOT8((a.hi), (b.hi), c); \
- })
-
-#define ARM_OFFSET1(a, b, c) \
- ({ \
- c += (ACC_DATA_TYPE)a * (ACC_DATA_TYPE)b; \
- })
-#define ARM_OFFSET2(a, b, c) \
- ({ \
- c += (ACC_DATA_TYPE)a.s0 * (ACC_DATA_TYPE)b; \
- c += (ACC_DATA_TYPE)a.s1 * (ACC_DATA_TYPE)b; \
- })
-#define ARM_OFFSET3(a, b, c) \
- ({ \
- ARM_OFFSET2(a, b, c); \
- c += (ACC_DATA_TYPE)a.s2 * (ACC_DATA_TYPE)b; \
- })
-#define ARM_OFFSET4(a, b, c) \
- ({ \
- ARM_OFFSET3(a, b, c); \
- c += (ACC_DATA_TYPE)a.s3 * (ACC_DATA_TYPE)b; \
- })
-#define ARM_OFFSET8(a, b, c) \
- ({ \
- ARM_OFFSET4((a.lo), (b), c); \
- ARM_OFFSET4((a.hi), (b), c); \
- })
-#define ARM_OFFSET16(a, b, c) \
- ({ \
- ARM_OFFSET8((a.lo), (b), c); \
- ARM_OFFSET8((a.hi), (b), c); \
- })
-
-#if N0 == 1
-#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
- ({ \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##0), (a_offset), (c)); \
- })
-#elif N0 == 2 // N) == 3
-#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
- ({ \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##0), (a_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s1)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##1), (a_offset), (c.s1)); \
- })
-#elif N0 == 3 // N0 == 3
-#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
- ({ \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##0), (a_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s1)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##1), (a_offset), (c.s1)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s2)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##2), (a_offset), (c.s2)); \
- })
-#elif N0 == 4 // N0 == 4
-#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
- ({ \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##0), (a_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s1)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##1), (a_offset), (c.s1)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s2)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##2), (a_offset), (c.s2)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s3)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##3), (a_offset), (c.s3)); \
- })
-#elif N0 == 8 // N0 == 8
-#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
- ({ \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##0), (a_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s1)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##1), (a_offset), (c.s1)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s2)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##2), (a_offset), (c.s2)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s3)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##3), (a_offset), (c.s3)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s4)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##4), (a_offset), (c.s4)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s5)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##5), (a_offset), (c.s5)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s6)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##6), (a_offset), (c.s6)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s7)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##7), (a_offset), (c.s7)); \
- })
-#elif N0 == 16 // N0 == 16
-#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
- ({ \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##0), (a_offset), (c.s0)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s1)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##1), (a_offset), (c.s1)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s2)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##2), (a_offset), (c.s2)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s3)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##3), (a_offset), (c.s3)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s4)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##4), (a_offset), (c.s4)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s5)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##5), (a_offset), (c.s5)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s6)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##6), (a_offset), (c.s6)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s7)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##7), (a_offset), (c.s7)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s8)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##8), (a_offset), (c.s8)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.s9)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##9), (a_offset), (c.s9)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.sA)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##A), (a_offset), (c.sA)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.sB)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##B), (a_offset), (c.sB)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.sC)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##C), (a_offset), (c.sC)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.sD)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##D), (a_offset), (c.sD)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.sE)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##E), (a_offset), (c.sE)); \
- CONCAT(ARM_OFFSET, k0) \
- ((a), (b_offset), (c.sF)); \
- CONCAT(ARM_OFFSET, k0) \
- ((b##F), (a_offset), (c.sF)); \
- })
-#else // N0 not supported
-#error "N0 value not supported"
-#endif // N0 conditions
-#else // defined(IS_QUANTIZED)
-
-#define ARM_DOT1(a, b, c) \
- ({ \
- c += (ACC_DATA_TYPE)a * (ACC_DATA_TYPE)b; \
- })
-#define ARM_DOT2(a, b, c) \
- ({ \
- c += (ACC_DATA_TYPE)a.s0 * (ACC_DATA_TYPE)b.s0; \
- c += (ACC_DATA_TYPE)a.s1 * (ACC_DATA_TYPE)b.s1; \
- })
-#define ARM_DOT3(a, b, c) \
- ({ \
- ARM_DOT2(a, b, c); \
- c += (ACC_DATA_TYPE)a.s2 * (ACC_DATA_TYPE)b.s2; \
- })
-#define ARM_DOT4(a, b, c) \
- ({ \
- ARM_DOT3(a, b, c); \
- c += (ACC_DATA_TYPE)a.s3 * (ACC_DATA_TYPE)b.s3; \
- })
-#define ARM_DOT8(a, b, c) \
- ({ \
- ARM_DOT4((a.lo), (b.lo), c); \
- ARM_DOT4((a.hi), (b.hi), c); \
- })
-#define ARM_DOT16(a, b, c) \
- ({ \
- ARM_DOT8((a.lo), (b.lo), c); \
- ARM_DOT8((a.hi), (b.hi), c); \
- })
-#endif // defined(IS_QUANTIZED)
-
-#if N0 == 1
-#define ARM_DOT_K0XN0(k0, a, b, c) \
- ({ \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##0), (c)); \
- })
-#elif N0 == 2 // N) == 3
-#define ARM_DOT_K0XN0(k0, a, b, c) \
- ({ \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##0), (c.s0)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##1), (c.s1)); \
- })
-#elif N0 == 3 // N0 == 3
-#define ARM_DOT_K0XN0(k0, a, b, c) \
- ({ \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##0), (c.s0)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##1), (c.s1)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##2), (c.s2)); \
- })
-#elif N0 == 4 // N0 == 4
-#define ARM_DOT_K0XN0(k0, a, b, c) \
- ({ \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##0), (c.s0)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##1), (c.s1)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##2), (c.s2)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##3), (c.s3)); \
- })
-#elif N0 == 8 // N0 == 8
-#define ARM_DOT_K0XN0(k0, a, b, c) \
- ({ \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##0), (c.s0)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##1), (c.s1)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##2), (c.s2)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##3), (c.s3)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##4), (c.s4)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##5), (c.s5)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##6), (c.s6)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##7), (c.s7)); \
- })
-#elif N0 == 16 // N0 == 16
-#define ARM_DOT_K0XN0(k0, a, b, c) \
- ({ \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##0), (c.s0)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##1), (c.s1)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##2), (c.s2)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##3), (c.s3)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##4), (c.s4)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##5), (c.s5)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##6), (c.s6)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##7), (c.s7)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##8), (c.s8)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##9), (c.s9)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##A), (c.sA)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##B), (c.sB)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##C), (c.sC)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##D), (c.sD)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##E), (c.sE)); \
- CONCAT(ARM_DOT, k0) \
- ((a), (b##F), (c.sF)); \
- })
-#else // N0 not supported
-#error "N0 value not supported"
-#endif // N0 conditions
-
-/** OpenCL kernel to compute the direct convolution.
- *
- * @note Data layout supported: NHWC
- * @note Data type supported: F32/F16/QASYMM8/QASYMM8_SIGNED
- * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
- * @note The accumulation data type must be passed at compile time using -DACC_DATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half)
- * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
- * @note The convolution strides must be passed at compile time using -DSTRIDE and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
- * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
- * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
- * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
- * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
- * @note The channels of the destination tensor must be passed at compile time using -DDST_CHANNELS (e.g. -DDDST_CHANNELS=64)
- * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
- * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
- * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
- * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
- * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
- * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
- * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note Only the following configurations of M0, N0 and K0 are currently supported:
- * - M0 = 1
- * - N0 = 2, 3, 4, 8, 16
- * - K0 = 2, 3, 4, 8, 16
- *
- *@note In case of QASYMM8/QASYMM8_SIGNED, the following extra information must be passed at compile time:
- * - -DIS_QUANTIZED
- * - The destination quantization multiplier e.g. -DDST_MULTIPLIER=1234
- * - The destination quantization shift e.g. -DDST_SHIFT=4
- * - The destination offset e.g. -DDST_OFFSET=4
- * - The source offset e.g. -DSRC_OFFSET=4
- * - The weights offset e.g. -DWEI_OFFSET=4
- * - The quantized zero value e.g. -DZERO_VALUE=4
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
- * @param[in] wei_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] wei_step_x wei_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] wei_step_y wei_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] wei_step_z wei_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the bias matrix
- * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr (if F32/F16) or S32 (if QASYMM8/QASYMM8_SIGNED)
- * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
- * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
- */
-__kernel void direct_convolution_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(wei),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bia),
-#endif // defined(HAS_BIAS)
- unsigned int wei_stride_w)
-{
-#if M0 != 1
-#error "M0: Only supported 1"
-#endif // M0 != 1
-
- const int cout = max((int)(get_global_id(0) * N0 - (N0 - PARTIAL_STORE_N0) % N0), 0); // input channels
- const int mout = get_global_id(1); // width x height
- const int zout = get_global_id(2); // batch size index
-
- REPEAT_VAR_INIT_TO_CONST(16, int, zero, 0);
- REPEAT_VAR_INIT_TO_CONST(M0, int, xi, 0);
- REPEAT_VAR_INIT_TO_CONST(M0, int, yi, 0);
-
-#define LINEAR_2_COORDS(i) \
- xi##i = ((mout * M0 + i) % DST_WIDTH) * STRIDE_X; \
- yi##i = ((mout * M0 + i) / DST_WIDTH) * STRIDE_Y; \
- xi##i -= PAD_LEFT; \
- yi##i -= PAD_TOP;
-
- // Convert the linear index to coordinate
- LINEAR_2_COORDS(0);
-
-#undef LINEAR_2_COORDS
-
- uint src_offset = src_offset_first_element_in_bytes + zout * src_stride_y * (SRC_WIDTH * SRC_HEIGHT);
- uint wei_offset = wei_offset_first_element_in_bytes + cout * wei_stride_w;
-
- // Initialize the accumulators
- REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0);
-
- for(int i = 0; i < (WEI_WIDTH * WEI_HEIGHT); ++i)
- {
- int xk = i % WEI_WIDTH;
- int yk = i / WEI_WIDTH;
-
- REPEAT_VAR_INIT_TO_CONST(M0, int, mi_valid_row, 0);
- REPEAT_VAR_INIT_TO_CONST(M0, int, mi_mask, 0);
-
- // Calculate the input row to read from source tensor
-#define MI_INIT(i) \
- mi_valid_row##i = max(min(xi##i + xk, SRC_WIDTH - 1), 0) + max(min(yi##i + yk, SRC_HEIGHT - 1), 0) * SRC_WIDTH; \
- mi_mask##i = (xi##i + xk) >= 0 && (xi##i + xk) < SRC_WIDTH && (yi##i + yk) >= 0 && (yi##i + yk) < SRC_HEIGHT;
-
- MI_INIT(0);
-
-#undef MI_INIT
-
- int k = 0;
- for(; k <= (SRC_CHANNELS - K0); k += K0)
- {
- // Load values from src tensor
- LOAD_BLOCK_INDIRECT(M0, K0, SRC_DATA_TYPE, a, src_ptr, src_offset + k * sizeof(SRC_DATA_TYPE), src_stride_y, mi_valid_row, mi_mask);
-
- // Load values from weights tensor
- LOAD_BLOCK(N0, K0, WEI_DATA_TYPE, b, wei_ptr, wei_offset, wei_stride_w, zero);
-
-#if defined(IS_QUANTIZED)
-#define TENSOR_DOT(K0, i) \
- if(mi_mask##i != 0) \
- { \
- ARM_DOT_K0XN0(K0, a##i, b, c##i); \
- ARM_OFFSET_K0XN0(K0, a##i, b, SRC_OFFSET, WEI_OFFSET, c##i); \
- } \
- else \
- { \
- ARM_DOT_K0XN0(K0, ((VEC_DATA_TYPE(SRC_DATA_TYPE, K0))ZERO_VALUE), b, c##i); \
- ARM_OFFSET_K0XN0(K0, ((VEC_DATA_TYPE(SRC_DATA_TYPE, K0))ZERO_VALUE), b, SRC_OFFSET, WEI_OFFSET, c##i); \
- }
-#else // defined(IS_QUANTIZED)
-#define TENSOR_DOT(K0, i) \
- ARM_DOT_K0XN0(K0, a##i, b, c##i);
-#endif // defined(IS_QUANTIZED)
-
- TENSOR_DOT(K0, 0);
-
- wei_offset += K0 * sizeof(WEI_DATA_TYPE);
- }
-
-#if(SRC_CHANNELS % K0) != 0
- // Left-over accumulations
- for(; k < SRC_CHANNELS; ++k)
- {
- // Load values from src tensor
- LOAD_BLOCK_INDIRECT(M0, 1, SRC_DATA_TYPE, a, src_ptr, src_offset + k * sizeof(SRC_DATA_TYPE), src_stride_y, mi_valid_row, mi_mask);
-
- // Load values from weights tensor
- LOAD_BLOCK(N0, 1, WEI_DATA_TYPE, b, wei_ptr, wei_offset, wei_stride_w, zero);
-
- TENSOR_DOT(1, 0);
-
-#undef TENSOR_DOT
-
- wei_offset += sizeof(WEI_DATA_TYPE);
- }
-#endif // (SRC_CHANNELS % K0) != 0
-
- c0 += (SRC_CHANNELS * SRC_OFFSET * WEI_OFFSET);
- }
-
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (cout * sizeof(DST_DATA_TYPE)) + (mout * M0 * dst_stride_y);
-
- // Batched direct convolution
- dst_addr += zout * dst_stride_y * (DST_WIDTH * DST_HEIGHT);
-
-#if defined(HAS_BIAS)
- __global uchar *bias_addr = bia_ptr + bia_offset_first_element_in_bytes + (cout * sizeof(BIA_DATA_TYPE));
-
- LOAD_BLOCK(1, N0, BIA_DATA_TYPE, bias, bias_addr, 0, zero0, zero);
-
- // c = c + bias[broadcasted]
- ADD_BLOCK_BROADCAST(M0, c, bias0);
-#endif // HAS_BIAS
-
-#if defined(IS_QUANTIZED)
-
- REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DST_DATA_TYPE, N0), cq, 0);
-
-#if DST_SHIFT < 0
-#define QUANTIZE(i) \
- c##i = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(c##i, DST_MULTIPLIER, DST_SHIFT, N0); \
- c##i = c##i + DST_OFFSET; \
- cq##i = CONVERT_SAT(c##i, VEC_DATA_TYPE(DST_DATA_TYPE, N0));
-#else // OUTPUT_SHIFT < 0
-#define QUANTIZE(i) \
- c##i = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(c##i, DST_MULTIPLIER, DST_SHIFT, N0); \
- c##i = c##i + DST_OFFSET; \
- cq##i = CONVERT_SAT(c##i, VEC_DATA_TYPE(DST_DATA_TYPE, N0));
-#endif // OUTPUT_SHIFT < 0
-
- QUANTIZE(0);
-
-#undef QUANTIZE
-
- STORE_VECTOR_SELECT(cq, DST_DATA_TYPE, dst_addr, N0, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0);
-#else // defined(IS_QUANTIZED)
- STORE_VECTOR_SELECT(c, DST_DATA_TYPE, dst_addr, N0, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0);
-#endif // defined(IS_QUANTIZED)
-} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/direct_convolution1x1.cl b/src/core/CL/cl_kernels/direct_convolution1x1.cl
deleted file mode 100644
index 8ab2d1d4ea..0000000000
--- a/src/core/CL/cl_kernels/direct_convolution1x1.cl
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#undef CONVERT_SAT
-
-#define ADD_OP(a, b) ((a) + (b))
-#define MUL_OP(a, b) ((a) * (b))
-#define CONVERT_SAT(a, b) ((a))
-
-#if defined(DATA_TYPE) && defined(DATA_SIZE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
-
-#if STRIDE_X == 3
-#define INPUT_PIXEL_STR(data_size) extract_input_stride3_##data_size
-#define INPUT_PIXEL(data_size) INPUT_PIXEL_STR(data_size)
-#elif STRIDE_X == 2
-#define INPUT_PIXEL(data_size) extract_input_stride2
-#elif STRIDE_X == 1
-#define INPUT_PIXEL(data_size) extract_input_stride1
-#else /* STRIDE_X not equals 1, 2 or 3 */
-#error "Only support strides 1, 2 and 3"
-#endif /* STRIDE_X == 3 */
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 1.
- *
- * @param[in] input_pixel Pointer to the first pixel.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride1(__global const DATA_TYPE *input_pixel)
-{
- return vload8(0, input_pixel);
-}
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 2.
- *
- * @param[in] input_pixel Pointer to the first pixel.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride2(__global const DATA_TYPE *input_pixel)
-{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- temp = vload16(0, input_pixel);
- return temp.s02468ace;
-}
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 3 and 32-bit data size.
- *
- * @param[in] input_pixel Pointer to the first pixel.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3_32(__global const DATA_TYPE *input_pixel)
-{
- VEC_DATA_TYPE(DATA_TYPE, 4)
- temp1 = vload4(0, input_pixel);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- temp2 = vload4(0, input_pixel + 6);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- temp3 = vload4(0, input_pixel + 12);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- temp4 = vload4(0, input_pixel + 18);
- return (VEC_DATA_TYPE(DATA_TYPE, 8))(temp1.s03, temp2.s03, temp3.s03, temp4.s03);
-}
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 3 and 16-bit data size.
- *
- * @param[in] input_pixel Pointer to the first pixel.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3_16(__global const DATA_TYPE *input_pixel)
-{
- VEC_DATA_TYPE(DATA_TYPE, 8)
- temp1 = vload8(0, input_pixel);
- VEC_DATA_TYPE(DATA_TYPE, 8)
- temp2 = vload8(0, input_pixel + 8);
- VEC_DATA_TYPE(DATA_TYPE, 8)
- temp3 = vload8(0, input_pixel + 16);
- return (VEC_DATA_TYPE(DATA_TYPE, 8))(temp1.s036, temp2.s147, temp3.s25);
-}
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 3 and 8-bit data size.
- *
- * @param[in] input_pixel Pointer to the first pixel.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3_8(__global const DATA_TYPE *input_pixel)
-{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- temp1 = vload16(0, input_pixel);
- VEC_DATA_TYPE(DATA_TYPE, 16)
- temp2 = vload16(0, input_pixel + 12);
- return (VEC_DATA_TYPE(DATA_TYPE, 8))(temp1.s0369, temp2.s0369);
-}
-
-/** This kernel performs a direct convolution to convolve the low three dimensions.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The data size must be passed at compile time using -DDATA_SIZE e.g. -DDATA_SIZE=32
- * @note The convolution stride x must be passed at compile time using -DSTRIDE_X e.g. -DSTRIDE_X=1
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution1x1(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-#endif /* defined(HAS_BIAS) */
-
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)
- values = 0;
-
- const uint z_index = get_global_id(2);
-
- weights.ptr += z_index * weights_stride_w;
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
- DATA_TYPE weight = *(__global DATA_TYPE *)weights.ptr;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- input_pixel = INPUT_PIXEL(DATA_SIZE)((__global DATA_TYPE *)src.ptr);
- values = ADD_OP(values, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))weight, input_pixel));
- src.ptr += src_stride_z;
- weights.ptr += weights_stride_z;
- }
-
-#ifdef HAS_BIAS
- values = ADD_OP(values, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, z_index))));
-#endif /* defined(HAS_BIAS) */
-
- vstore8(CONVERT_SAT(values, VEC_DATA_TYPE(DATA_TYPE, 8)), 0, (__global DATA_TYPE *)dst.ptr);
-}
-#endif // defined(DATA_TYPE) && defined(DATA_SIZE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
-
-#if defined(WEIGHTS_DEPTH)
-
-#define CONVOLUTION1x1_BIFROST(acc, src, weight_value) \
- ({ \
- acc.s0 = mad(src.s0, weight_value, acc.s0); \
- acc.s1 = mad(src.s1, weight_value, acc.s1); \
- acc.s2 = mad(src.s2, weight_value, acc.s2); \
- acc.s3 = mad(src.s3, weight_value, acc.s3); \
- })
-
-/** An optimized direct convolution 1x1 OpenCL kernel for Bifrost architectures when the data type is F32
- *
- * @note This OpenCL kernel works only with stride_x and stride_y equal to 1
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note In case biases, -DHAS_BIAS must to be passed at compile
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution1x1_f32_bifrost(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- // Get the kernel index
- const int kernel_index = get_global_id(2);
-
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- float4 acc0 = 0.0f;
- float4 acc1 = 0.0f;
- float4 acc2 = 0.0f;
- float4 acc3 = 0.0f;
-
- __global uchar *weights_addr = (__global uchar *)(weights_ptr + weights_offset_first_element_in_bytes + kernel_index * weights_stride_w);
- __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0);
-
- for(ushort d = 0; d < (ushort)WEIGHTS_DEPTH; ++d)
- {
- // Load the weights
- float weight = *((__global float *)weights_addr);
-
- // Load values from row0 of input tensor
- float4 src0 = vload4(0, (__global float *)(src_addr + 0 * src_stride_y));
- float4 src1 = vload4(0, (__global float *)(src_addr + 1 * src_stride_y));
- float4 src2 = vload4(0, (__global float *)(src_addr + 2 * src_stride_y));
- float4 src3 = vload4(0, (__global float *)(src_addr + 3 * src_stride_y));
-
- CONVOLUTION1x1_BIFROST(acc0, src0, weight);
- CONVOLUTION1x1_BIFROST(acc1, src1, weight);
- CONVOLUTION1x1_BIFROST(acc2, src2, weight);
- CONVOLUTION1x1_BIFROST(acc3, src3, weight);
-
- src_addr += src_stride_z;
- weights_addr += weights_stride_z;
- }
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- float bias = (float) * ((__global float *)(vector_offset(&biases, kernel_index)));
-
- acc0.s0 += bias;
- acc0.s1 += bias;
- acc0.s2 += bias;
- acc0.s3 += bias;
- acc1.s0 += bias;
- acc1.s1 += bias;
- acc1.s2 += bias;
- acc1.s3 += bias;
- acc2.s0 += bias;
- acc2.s1 += bias;
- acc2.s2 += bias;
- acc2.s3 += bias;
- acc3.s0 += bias;
- acc3.s1 += bias;
- acc3.s2 += bias;
- acc3.s3 += bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore4(acc0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y));
- vstore4(acc1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y));
- vstore4(acc2, 0, (__global float *)(dst.ptr + 2 * dst_stride_y));
- vstore4(acc3, 0, (__global float *)(dst.ptr + 3 * dst_stride_y));
-}
-#endif // defined(WEIGHTS_DEPTH)
diff --git a/src/core/CL/cl_kernels/direct_convolution3x3.cl b/src/core/CL/cl_kernels/direct_convolution3x3.cl
deleted file mode 100644
index 811df053c4..0000000000
--- a/src/core/CL/cl_kernels/direct_convolution3x3.cl
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#undef CONVERT_SAT
-
-#define ADD_OP(a, b) ((a) + (b))
-#define MUL_OP(a, b) ((a) * (b))
-#define CONVERT_SAT(a, b) ((a))
-
-#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x3(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr)
-#elif STRIDE_X == 2 /* STRIDE_X == 1 */
-#define CONVOLUTION1x3(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x3_STRIDE2(acc, src_row_ptr, weights_row_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X == 2 */
-
-#define CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 3) \
- weights_values0 = vload3(0, weights_row_ptr); \
- VEC_DATA_TYPE(DATA_TYPE, 8) \
- src0 = vload8(0, src_row_ptr); \
- VEC_DATA_TYPE(DATA_TYPE, 2) \
- src1 = vload2(0, src_row_ptr + 8); \
- \
- acc = ADD_OP(acc, MUL_OP(src0, (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0)); \
- acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1234, src0.s567, src1.s0), (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1)); \
- acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s234, src0.s567, src1.s01), (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2)); \
- })
-
-#define CONVOLUTION1x3_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 3) \
- weights_values0 = vload3(0, weights_row_ptr); \
- VEC_DATA_TYPE(DATA_TYPE, 16) \
- src0 = vload16(0, src_row_ptr); \
- DATA_TYPE src1 = *(src_row_ptr + 16); \
- \
- acc = ADD_OP(acc, MUL_OP(src0.even, (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0)); \
- acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1357, src0.s9BDF), (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1)); \
- acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1), (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2)); \
- })
-
-/** This kernel performs a direct convolution to convolve the low three dimensions.
- *
- * @note This OpenCL kernel works with stride_x = 1 and 2
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note If biases are used then -DHAS_BIAS has to be passed at compile time
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution3x3(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)
- values0 = 0;
-
- __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0);
- __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0);
-
- const int kernel_index = get_global_id(2);
- weights_addr += kernel_index * weights_stride_w;
-
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y));
-
- src_addr += src_stride_z;
- weights_addr += weights_stride_z;
- }
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- values0 = ADD_OP(values0, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, kernel_index))));
-#endif /* defined(HAS_BIAS) */
-
- vstore8(CONVERT_SAT(values0, VEC_DATA_TYPE(DATA_TYPE, 8)), 0, (__global DATA_TYPE *)dst.ptr);
-}
-#endif //defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
-
-#if defined(WEIGHTS_DEPTH)
-
-#define CONVOLUTION1x3_BIFROST(acc, src0, src1, weights_row0) \
- ({ \
- acc.s0 = mad(src0.s0, weights_row0.s0, acc.s0); \
- acc.s1 = mad(src0.s1, weights_row0.s0, acc.s1); \
- acc.s2 = mad(src0.s2, weights_row0.s0, acc.s2); \
- acc.s3 = mad(src0.s3, weights_row0.s0, acc.s3); \
- acc.s0 = mad(src0.s1, weights_row0.s1, acc.s0); \
- acc.s1 = mad(src0.s2, weights_row0.s1, acc.s1); \
- acc.s2 = mad(src0.s3, weights_row0.s1, acc.s2); \
- acc.s3 = mad(src1.s0, weights_row0.s1, acc.s3); \
- acc.s0 = mad(src0.s2, weights_row0.s2, acc.s0); \
- acc.s1 = mad(src0.s3, weights_row0.s2, acc.s1); \
- acc.s2 = mad(src1.s0, weights_row0.s2, acc.s2); \
- acc.s3 = mad(src1.s1, weights_row0.s2, acc.s3); \
- })
-
-/** An optimized direct convolution 3x3 OpenCL kernel for Bifrost architectures when the data type is F32
- *
- * @note This OpenCL kernel works only with stride_x and stride_y equal to 1
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note In case biases, -DHAS_BIAS must to be passed at compile
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution3x3_f32_bifrost(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- // Get the kernel index
- const int kernel_index = get_global_id(2);
-
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- float4 values0 = 0;
- float4 values1 = 0;
- float4 values2 = 0;
-
- __global uchar *weights_addr = (__global uchar *)(weights_ptr + weights_offset_first_element_in_bytes + kernel_index * weights_stride_w);
- __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0);
-
- // Note: Since each work-item computes 4x3 elements, we need to load 5 rows from the input tensor
-
- for(ushort d = 0; d < (ushort)WEIGHTS_DEPTH; ++d)
- {
- // Load the weights
- float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
- float4 src0;
- float2 src1;
-
- // Load values from row0 of input tensor
- src0 = vload4(0, (__global float *)(src_addr + 0 * src_stride_y));
- src1 = vload2(0, (__global float *)(src_addr + 0 * src_stride_y) + 4);
-
- CONVOLUTION1x3_BIFROST(values0, src0, src1, weights_row0);
-
- // Load values from row1 of input tensor
- src0 = vload4(0, (__global float *)(src_addr + 1 * src_stride_y));
- src1 = vload2(0, (__global float *)(src_addr + 1 * src_stride_y) + 4);
-
- // Accumulate
- CONVOLUTION1x3_BIFROST(values0, src0, src1, weights_row1);
- CONVOLUTION1x3_BIFROST(values1, src0, src1, weights_row0);
-
- // Load values from row2 of input tensor
- src0 = vload4(0, (__global float *)(src_addr + 2 * src_stride_y));
- src1 = vload2(0, (__global float *)(src_addr + 2 * src_stride_y) + 4);
-
- // Accumulate
- CONVOLUTION1x3_BIFROST(values0, src0, src1, weights_row2);
- CONVOLUTION1x3_BIFROST(values1, src0, src1, weights_row1);
- CONVOLUTION1x3_BIFROST(values2, src0, src1, weights_row0);
-
- // Load values from row3 of input tensor
- src0 = vload4(0, (__global float *)(src_addr + 3 * src_stride_y));
- src1 = vload2(0, (__global float *)(src_addr + 3 * src_stride_y) + 4);
-
- // Accumulate
- CONVOLUTION1x3_BIFROST(values1, src0, src1, weights_row2);
- CONVOLUTION1x3_BIFROST(values2, src0, src1, weights_row1);
-
- // Row4
- src0 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y));
- src1 = vload2(0, (__global float *)(src_addr + 4 * src_stride_y) + 4);
-
- // Accumulate
- CONVOLUTION1x3_BIFROST(values2, src0, src1, weights_row2);
-
- src_addr += src_stride_z;
- weights_addr += weights_stride_z;
- }
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- float bias = (float) * ((__global float *)(vector_offset(&biases, kernel_index)));
-
- values0 += (float4)bias;
- values1 += (float4)bias;
- values2 += (float4)bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore4(values0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y));
- vstore4(values1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y));
- vstore4(values2, 0, (__global float *)(dst.ptr + 2 * dst_stride_y));
-}
-#endif // defined(WEIGHTS_DEPTH)
diff --git a/src/core/CL/cl_kernels/direct_convolution5x5.cl b/src/core/CL/cl_kernels/direct_convolution5x5.cl
deleted file mode 100644
index 59d668f0bf..0000000000
--- a/src/core/CL/cl_kernels/direct_convolution5x5.cl
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#undef CONVERT_SAT
-
-#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x5(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr)
-#elif STRIDE_X == 2 /* STRIDE_X == 1 */
-#define CONVOLUTION1x5(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x5_STRIDE2(acc, src_row_ptr, weights_row_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X == 2 */
-
-#define CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- weights_values0 = vload4(0, weights_row_ptr); \
- DATA_TYPE weights_value1 = *(weights_row_ptr + 4); \
- VEC_DATA_TYPE(DATA_TYPE, 8) \
- src0 = vload8(0, src_row_ptr); \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- src1 = vload4(0, src_row_ptr + 8); \
- \
- acc += src0 * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1234, src0.s567, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s234, src0.s567, src1.s01) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s345, src0.s67, src1.s012) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s45, src0.s67, src1.s0123) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \
- })
-
-#define CONVOLUTION1x5_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- weights_values0 = vload4(0, weights_row_ptr); \
- DATA_TYPE weights_value1 = *(weights_row_ptr + 4); \
- VEC_DATA_TYPE(DATA_TYPE, 16) \
- src0 = vload16(0, src_row_ptr); \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- src1 = vload4(0, src_row_ptr + 16); \
- acc += src0.even * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1357, src0.s9BDF) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \
- \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s3579, src0.sBDF, src1.s1) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s468a, src0.sCE, src1.s02) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \
- })
-
-/** This kernel performs a direct convolution to convolve the low three dimensions.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note If biases are used then -DHAS_BIAS has to be passed at compile time
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution5x5(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- values0 = 0;
-
- __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0);
- __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0);
-
- const int kernel_index = get_global_id(2);
- weights_addr += kernel_index * weights_stride_w;
-
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)src_addr, (__global DATA_TYPE *)weights_addr);
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_y));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_y));
-
- src_addr += src_stride_z;
- weights_addr += weights_stride_z;
- }
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- values0 += (VEC_DATA_TYPE(DATA_TYPE, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, kernel_index)));
-#endif /* defined(HAS_BIAS) */
-
- vstore8(values0, 0, (__global DATA_TYPE *)dst.ptr);
-}
-#endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
-
-#if defined(WEIGHTS_DEPTH)
-
-#define CONVOLUTION1x5_BIFROST(acc, src0, weights_row00, weights_row01) \
- ({ \
- acc.s0 = mad(src0.s0, weights_row00.s0, acc.s0); \
- acc.s1 = mad(src0.s1, weights_row00.s0, acc.s1); \
- acc.s2 = mad(src0.s2, weights_row00.s0, acc.s2); \
- acc.s3 = mad(src0.s3, weights_row00.s0, acc.s3); \
- acc.s0 = mad(src0.s1, weights_row00.s1, acc.s0); \
- acc.s1 = mad(src0.s2, weights_row00.s1, acc.s1); \
- acc.s2 = mad(src0.s3, weights_row00.s1, acc.s2); \
- acc.s3 = mad(src0.s4, weights_row00.s1, acc.s3); \
- acc.s0 = mad(src0.s2, weights_row00.s2, acc.s0); \
- acc.s1 = mad(src0.s3, weights_row00.s2, acc.s1); \
- acc.s2 = mad(src0.s4, weights_row00.s2, acc.s2); \
- acc.s3 = mad(src0.s5, weights_row00.s2, acc.s3); \
- acc.s0 = mad(src0.s3, weights_row00.s3, acc.s0); \
- acc.s1 = mad(src0.s4, weights_row00.s3, acc.s1); \
- acc.s2 = mad(src0.s5, weights_row00.s3, acc.s2); \
- acc.s3 = mad(src0.s6, weights_row00.s3, acc.s3); \
- acc.s0 = mad(src0.s4, weights_row01, acc.s0); \
- acc.s1 = mad(src0.s5, weights_row01, acc.s1); \
- acc.s2 = mad(src0.s6, weights_row01, acc.s2); \
- acc.s3 = mad(src0.s7, weights_row01, acc.s3); \
- })
-
-/** An optimized direct convolution 5x5 OpenCL kernel for Bifrost architectures when the data type is F32
- *
- * @note This OpenCL kernel works only with stride_x and stride_y equal to 1
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note If biases are used then -DHAS_BIAS has to be passed at compile time
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution5x5_f32_bifrost(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- // Get the kernel index
- const int kernel_index = get_global_id(2);
-
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- float4 values0 = 0.0f;
- float4 values1 = 0.0f;
-
- __global uchar *weights_addr = (__global uchar *)(weights_ptr + weights_offset_first_element_in_bytes + kernel_index * weights_stride_w);
- __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0);
-
- // Note: Since each work-item computes 4x2 elements, we need to load 6 rows from the input tensor
-
- for(ushort d = 0; d < (ushort)WEIGHTS_DEPTH; ++d)
- {
- // Load the weights from row0 and row1
- float4 weights_row00 = vload4(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float weights_row01 = *((__global float *)(weights_addr + 0 * weights_stride_y) + 4);
- float4 weights_row10 = vload4(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float weights_row11 = *((__global float *)(weights_addr + 1 * weights_stride_y) + 4);
- float8 src0;
-
- // Load values from row0 of input tensor
- src0 = vload8(0, (__global float *)(src_addr + 0 * src_stride_y));
-
- // Accumulate
- CONVOLUTION1x5_BIFROST(values0, src0, weights_row00, weights_row01);
-
- // Load values from row1 of input tensor
- src0 = vload8(0, (__global float *)(src_addr + 1 * src_stride_y));
-
- // Accumulate
- CONVOLUTION1x5_BIFROST(values0, src0, weights_row10, weights_row11);
- CONVOLUTION1x5_BIFROST(values1, src0, weights_row00, weights_row01);
-
- // Load values from row2 of input tensor
- src0 = vload8(0, (__global float *)(src_addr + 2 * src_stride_y));
-
- // Load weights from row2
- weights_row00 = vload4(0, (__global float *)(weights_addr + 2 * weights_stride_y));
- weights_row01 = *((__global float *)(weights_addr + 2 * weights_stride_y) + 4);
-
- // Accumulate
- CONVOLUTION1x5_BIFROST(values0, src0, weights_row00, weights_row01);
- CONVOLUTION1x5_BIFROST(values1, src0, weights_row10, weights_row11);
-
- // Load values from row3 of input tensor
- src0 = vload8(0, (__global float *)(src_addr + 3 * src_stride_y));
-
- // Load weights from row3
- weights_row10 = vload4(0, (__global float *)(weights_addr + 3 * weights_stride_y));
- weights_row11 = *((__global float *)(weights_addr + 3 * weights_stride_y) + 4);
-
- // Accumulate
- CONVOLUTION1x5_BIFROST(values0, src0, weights_row10, weights_row11);
- CONVOLUTION1x5_BIFROST(values1, src0, weights_row00, weights_row01);
-
- // Load values from row4 of input tensor
- src0 = vload8(0, (__global float *)(src_addr + 4 * src_stride_y));
-
- // Load weights from row4
- weights_row00 = vload4(0, (__global float *)(weights_addr + 4 * weights_stride_y));
- weights_row01 = *((__global float *)(weights_addr + 4 * weights_stride_y) + 4);
-
- CONVOLUTION1x5_BIFROST(values0, src0, weights_row00, weights_row01);
- CONVOLUTION1x5_BIFROST(values1, src0, weights_row10, weights_row11);
-
- // Load values from row5 of input tensor
- src0 = vload8(0, (__global float *)(src_addr + 5 * src_stride_y));
-
- // Accumulate
- CONVOLUTION1x5_BIFROST(values1, src0, weights_row00, weights_row01);
-
- src_addr += src_stride_z;
- weights_addr += weights_stride_z;
- }
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- float4 bias = (float4) * ((__global float *)(vector_offset(&biases, kernel_index)));
-
- values0 += bias;
- values1 += bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore4(values0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y));
- vstore4(values1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y));
-}
-#endif // defined(WEIGHTS_DEPTH)
diff --git a/src/core/CL/cl_kernels/direct_convolution_quantized.cl b/src/core/CL/cl_kernels/direct_convolution_quantized.cl
deleted file mode 100644
index b80d4f587e..0000000000
--- a/src/core/CL/cl_kernels/direct_convolution_quantized.cl
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers_asymm.h"
-
-#undef CONVERT_SAT_STR
-#undef CONVERT_SAT
-
-#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)
-
-#define CONVERT_SAT_STR(x, type) (convert_##type##8_sat((x)))
-#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
-
-#if KERNEL_SIZE == 9
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x9(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x9_STRIDE1(acc, src_row_ptr, weights_row_ptr)
-#elif STRIDE_X == 2
-#define CONVOLUTION1x9(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x9_STRIDE2(acc, src_row_ptr, weights_row_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X */
-
-#define CONVOLUTION1x9_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \
- int weights_value1 = convert_int(*(weights_row_ptr + 8)); \
- int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
- acc += (src0.lo + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s1234, src0.s5678) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s2345, src0.s6789) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s3456, src0.s789A) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s4567, src0.s89AB) + INPUT_OFFSET) * ((int8)weights_values0.s4 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s5678, src0.s9ABC) + INPUT_OFFSET) * ((int8)weights_values0.s5 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s6789, src0.sABCD) + INPUT_OFFSET) * ((int8)weights_values0.s6 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s789A, src0.sBCDE) + INPUT_OFFSET) * ((int8)weights_values0.s7 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s89AB, src0.sCDEF) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \
- })
-
-#define CONVOLUTION1x9_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \
- int weights_value1 = convert_int(*(weights_row_ptr + 8)); \
- int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
- int8 src1 = convert_int8(vload8(0, src_row_ptr + 16)); \
- acc += (src0.even + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s1357, src0.s9BDF) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s468A, src0.sCE, src1.s02) + INPUT_OFFSET) * ((int8)weights_values0.s4 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s579B, src0.sDF, src1.s13) + INPUT_OFFSET) * ((int8)weights_values0.s5 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s68AC, src0.sE, src1.s024) + INPUT_OFFSET) * ((int8)weights_values0.s6 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s79BD, src0.sF, src1.s135) + INPUT_OFFSET) * ((int8)weights_values0.s7 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s8ACE, src1.s0246) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \
- })
-
-#elif KERNEL_SIZE == 5
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x5(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr)
-#elif STRIDE_X == 2
-#define CONVOLUTION1x5(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x5_STRIDE2(acc, src_row_ptr, weights_row_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X */
-
-#define CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int4 weights_values0 = convert_int4(vload4(0, weights_row_ptr)); \
- int weights_value1 = convert_int(*(weights_row_ptr + 4)); \
- int8 src0 = convert_int8(vload8(0, src_row_ptr)); \
- int4 src1 = convert_int4(vload4(0, src_row_ptr + 8)); \
- acc += (src0 + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s1234, src0.s567, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s234, src0.s567, src1.s01) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s345, src0.s67, src1.s012) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s45, src0.s67, src1.s0123) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \
- })
-
-#define CONVOLUTION1x5_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int4 weights_values0 = convert_int4(vload4(0, weights_row_ptr)); \
- int weights_value1 = convert_int(*(weights_row_ptr + 4)); \
- int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
- int4 src1 = convert_int4(vload4(0, src_row_ptr + 16)); \
- acc += (src0.even + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s1357, src0.s9BDF) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s468a, src0.sCE, src1.s02) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \
- })
-
-#elif KERNEL_SIZE == 3
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x3(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr)
-#elif STRIDE_X == 2
-#define CONVOLUTION1x3(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x3_STRIDE2(acc, src_row_ptr, weights_row_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X */
-
-#define CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int3 weights_values0 = convert_int3(vload3(0, weights_row_ptr)); \
- int8 src0 = convert_int8(vload8(0, src_row_ptr)); \
- int2 src1 = convert_int2(vload2(0, src_row_ptr + 8)); \
- acc += (src0 + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s1234, src0.s567, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s234, src0.s567, src1.s01) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
- })
-
-#define CONVOLUTION1x3_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int3 weights_values0 = convert_int3(vload3(0, weights_row_ptr)); \
- int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
- int src1 = convert_int(*(src_row_ptr + 16)); \
- acc += (src0.even + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s1357, src0.s9BDF) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
- acc += ((int8)(src0.s2468, src0.sACE, src1) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
- })
-
-#elif KERNEL_SIZE == 1
-
-#if STRIDE_X == 3
-#define INPUT_VALUE extract_input_stride3
-#elif STRIDE_X == 2
-#define INPUT_VALUE extract_input_stride2
-#elif STRIDE_X == 1
-#define INPUT_VALUE extract_input_stride1
-
-#else /* STRIDE_X not equals 1, 2 or 3 */
-#error "Only support strides 1, 2 and 3"
-#endif /* STRIDE_X */
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 1.
- *
- * @param[in] input_value Pointer to the first value.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride1(__global const DATA_TYPE *input_value)
-{
- return vload8(0, input_value);
-}
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 2.
- *
- * @param[in] input_value Pointer to the first value.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride2(__global const DATA_TYPE *input_value)
-{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- temp = vload16(0, input_value);
- return temp.s02468ace;
-}
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 3 and 8-bit data size.
- *
- * @param[in] input_value Pointer to the first value.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3(__global const DATA_TYPE *input_value)
-{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- temp1 = vload16(0, input_value);
- VEC_DATA_TYPE(DATA_TYPE, 16)
- temp2 = vload16(0, input_value + 12);
- return (VEC_DATA_TYPE(DATA_TYPE, 8))(temp1.s0369, temp2.s0369);
-}
-
-#else /* KERNEL_SIZE not equals 1, 3 , 5, 9 */
-#error "Only kernel sizes 1, 3, 5 and 9 are supported"
-#endif /* KERNEL_SIZE */
-
-/** This kernel performs a direct convolution to convolve the low three dimensions.
- *
- * @note The convolution stride x must be passed at compile time using -DSTRIDE_X e.g. -DSTRIDE_X=1
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note If biases are used then -DHAS_BIAS has to be passed at compile time
- * @note The output quantization multiplier must be passed at compile time using -DOUTPUT_MULTIPLIER e.g. -DOUTPUT_MULTIPLIER=1234
- * @note The output quantization shift must be passed at compile time using -DOUTPUT_SHIFT e.g. -DOUTPUT_SHIFT=4
- * @note The input offset quantization parameter must be passed at compile time using -DINPUT_OFFSET e.g. -DINPUT_OFFSET=3
- * @note The weights offset quantization parameter must be passed at compile time using -DWEIGHTS_OFFSET e.g. -DWEIGHTS_OFFSET=3
- * @note The destination offset quantization parameter must be passed at compile time using -DOUTPUT_OFFSET e.g. -DOUTPUT_OFFSET=3
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Supported data types: S32
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution_quantized(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- int8 values0 = 0;
-
- __global DATA_TYPE *weights_addr = (__global DATA_TYPE *)tensor3D_offset(&weights, 0, 0, 0);
- __global DATA_TYPE *src_addr = (__global DATA_TYPE *)offset(&src, 0, 0);
-
- const int kernel_index = get_global_id(2);
- weights_addr += kernel_index * weights_stride_w;
-
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
-#if KERNEL_SIZE == 9
- CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y));
- CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y));
- CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y));
- CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_y));
- CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_y));
- CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 5 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 5 * weights_stride_y));
- CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 6 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 6 * weights_stride_y));
- CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 7 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 7 * weights_stride_y));
- CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 8 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 8 * weights_stride_y));
-#elif KERNEL_SIZE == 5
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)src_addr, (__global DATA_TYPE *)weights_addr);
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_y));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_y));
-#elif KERNEL_SIZE == 3
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y));
-#elif KERNEL_SIZE == 1
- int weight = convert_int(*(__global DATA_TYPE *)weights_addr);
- int8 input_value = convert_int8(INPUT_VALUE((__global DATA_TYPE *)src_addr));
- values0 += (input_value + INPUT_OFFSET) * ((int8)weight + WEIGHTS_OFFSET);
-#endif /* (KERNEL_SIZE == 1) || (KERNEL_SIZE == 3) || (KERNEL_SIZE == 5) */
-
- src_addr += src_stride_z;
- weights_addr += weights_stride_z;
- }
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
- __global int *bias_addr = ((__global int *)(vector_offset(&biases, kernel_index)));
- values0 += (int8)(*bias_addr);
-#endif /* defined(HAS_BIAS) */
-
-#if OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_SHIFT < 0
- values0 = values0 + OUTPUT_OFFSET;
-
- vstore8(CONVERT_SAT(values0, DATA_TYPE), 0, (__global DATA_TYPE *)dst.ptr);
-}
-#endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)
diff --git a/src/core/CL/cl_kernels/gemm_helpers.h b/src/core/CL/cl_kernels/gemm_helpers.h
index be72efa3b4..4bef02314f 100644
--- a/src/core/CL/cl_kernels/gemm_helpers.h
+++ b/src/core/CL/cl_kernels/gemm_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,17 +31,17 @@
* @param[in] offset The offset within the vector. Offset can only be of the same size of the OpenCL vector (2,3,4,8,16)
* @param[in] n0 The number of consecutive columns to access. n0 + offset must be <= 16
* @param[in] x Vector to access
- * @{
+ *
*/
#define SCALAR_ACCESS_STR(offset, n0, x) scalar_access_##offset##_##n0(x)
-#define SCALAR_ACCESS(offset, n0, x) SCALAR_ACCESS_STR(offset, n0, x)
+#define SCALAR_ACCESS(offset, n0, x) SCALAR_ACCESS_STR(offset, n0, x)
// offset == 0
-#define scalar_access_0_1(x) ((x).s0)
-#define scalar_access_0_2(x) ((x).s01)
-#define scalar_access_0_3(x) ((x).s012)
-#define scalar_access_0_4(x) ((x).s0123)
-#define scalar_access_0_8(x) ((x).s01234567)
+#define scalar_access_0_1(x) ((x).s0)
+#define scalar_access_0_2(x) ((x).s01)
+#define scalar_access_0_3(x) ((x).s012)
+#define scalar_access_0_4(x) ((x).s0123)
+#define scalar_access_0_8(x) ((x).s01234567)
#define scalar_access_0_16(x) ((x).s0123456789ABCDEF)
// offset == 1
@@ -100,8 +100,7 @@
* @param[in] Z The z-axis offset vector
* @{
*/
-#define LOAD_TENSOR_ROW_0(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
- ({})
+#define LOAD_TENSOR_ROW_0(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) ({})
#define LOAD_TENSOR_ROW_1(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##0) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
@@ -186,8 +185,10 @@
* @param[in] Z The z-axis offset vector
* @{
*/
-#define LOAD_TENSOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) LOAD_TENSOR_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z)
-#define LOAD_TENSOR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) LOAD_TENSOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z)
+#define LOAD_TENSOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
+ LOAD_TENSOR_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z)
+#define LOAD_TENSOR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
+ LOAD_TENSOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z)
/** @} */ // end of group LOAD_TENSOR
/** Load 2D tensor (consecutive rows and columns) with Z offset.
@@ -202,8 +203,7 @@
* @param[in] Z The z-axis offset vector
* @{
*/
-#define LOAD_TENSOR_M0X0(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
- ({})
+#define LOAD_TENSOR_M0X0(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) ({})
#define LOAD_TENSOR_M0X1(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin);
@@ -279,8 +279,11 @@
* @param[in] Z The z-axis offset vector
* @{
*/
-#define LOAD_TENSOR_M0XN0_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) LOAD_TENSOR_M0X##N0(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
-#define LOAD_TENSOR_M0XN0(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) LOAD_TENSOR_M0XN0_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
+#define LOAD_TENSOR_M0XN0_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
+ LOAD_TENSOR_M0X##N0(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
+#define LOAD_TENSOR_M0XN0(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
+ LOAD_TENSOR_M0XN0_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
+/** @}*/ // end of group LOAD_TENSOR_M0XN0
/** Loads the rows from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1).
* @name LOAD_ROW_n
@@ -394,10 +397,323 @@
* @param[in] Z The z-axis offset vector
* @{
*/
-#define LOAD_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) LOAD_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z)
-#define LOAD_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) LOAD_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z)
+#define LOAD_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z)
+#define LOAD_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z)
/** @} */ // end of group LOAD_BLOCK
+/** Partially load the 0 to (n-1)th rows of the given variables
+ * @name LOAD_ROW_PARTIAL_n
+ * Within each row, load the lower @p LOAD_N0 elements of vectors of width @p N0
+ *
+ * @note in case @p LOAD_N0 != 1, 2, 3, 4, 8, 16, extra vload(s) will be invoked, thus incurring small performance penalty.
+ *
+ * @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
+ * @param[in] LOAD_N0 The **lower** size of the vectors to load. Supported: [1-16 and <= @p N0
+ * @param[in] DATA_TYPE The data type of the vectors
+ * @param[in] BASENAME The basename of the variables
+ * @param[in] PTR The base pointer
+ * @param[in] OFFSET The offset within a row
+ * @param[in] STRIDE_Y The stride value in y-axis direction
+ * @param[in] Z The offset in z-axis direction
+ * @{
+ */
+#define LOAD_ROW_PARTIAL_1(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + OFFSET + 0 * STRIDE_Y + Z##0));
+
+#define LOAD_ROW_PARTIAL_2(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_1(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + OFFSET + 1 * STRIDE_Y + Z##1));
+
+#define LOAD_ROW_PARTIAL_3(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_2(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + OFFSET + 2 * STRIDE_Y + Z##2));
+
+#define LOAD_ROW_PARTIAL_4(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_3(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + OFFSET + 3 * STRIDE_Y + Z##3));
+
+#define LOAD_ROW_PARTIAL_5(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_4(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + OFFSET + 4 * STRIDE_Y + Z##4));
+
+#define LOAD_ROW_PARTIAL_6(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_5(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + OFFSET + 5 * STRIDE_Y + Z##5));
+
+#define LOAD_ROW_PARTIAL_7(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_6(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + OFFSET + 6 * STRIDE_Y + Z##6));
+
+#define LOAD_ROW_PARTIAL_8(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_7(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + OFFSET + 7 * STRIDE_Y + Z##7));
+
+#define LOAD_ROW_PARTIAL_9(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_8(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + OFFSET + 8 * STRIDE_Y + Z##8));
+
+#define LOAD_ROW_PARTIAL_10(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_9(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + OFFSET + 9 * STRIDE_Y + Z##9));
+
+#define LOAD_ROW_PARTIAL_11(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_10(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + OFFSET + 10 * STRIDE_Y + Z##A));
+
+#define LOAD_ROW_PARTIAL_12(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_11(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + OFFSET + 11 * STRIDE_Y + Z##B));
+
+#define LOAD_ROW_PARTIAL_13(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_12(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + OFFSET + 12 * STRIDE_Y + Z##C));
+
+#define LOAD_ROW_PARTIAL_14(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_13(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + OFFSET + 13 * STRIDE_Y + Z##D));
+
+#define LOAD_ROW_PARTIAL_15(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_14(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + OFFSET + 14 * STRIDE_Y + Z##E));
+
+#define LOAD_ROW_PARTIAL_16(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_15(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ VLOAD_PARTIAL(N0, LOAD_N0) \
+ (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + OFFSET + 15 * STRIDE_Y + Z##F));
+/** @} */ // end of group LOAD_ROW_PARTIAL_n
+
+/** Partially load a block of the given size LOAD_M0xLOAD_N0
+ * @name LOAD_BLOCK_PARTIAL
+ *
+ * @note The vector width @p N0 is also required for correct partial storing behaviour.
+ * @note in case @p LOAD_N0 != 1, 2, 3, 4, 8, 16, extra vload(s) will be invoked, thus incurring small performance penalty.
+ *
+ * The data to load is expected to have consecutive names for each row.
+ * E.g., for LOAD_M0=3 and basename=c, the expected names are c0, c1 and c2.
+ * The Z offset is expected to have consecutive names.
+ * E.g., for LOAD_M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
+ *
+ * @param[in] LOAD_M0 The number of rows to load. Supported: 1-16
+ * @param[in] LOAD_N0 The lower number of elements of vectors to load. Supported: 1-16 and <= @p N0
+ * @param[in] N0 The size of each vector. Supported: 1, 2, 3, 4, 8, 16
+ * @param[in] DATA_TYPE The data type of the vectors
+ * @param[in] BASENAME The basename of the variables
+ * @param[in] PTR The base pointer
+ * @param[in] OFFSET The offset within a row
+ * @param[in] STRIDE_Y The stride value in y-axis direction
+ * @param[in] Z The offset in z-axis direction
+ * @{
+ */
+#define LOAD_BLOCK_PARTIAL_STR(LOAD_M0, LOAD_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_ROW_PARTIAL_##LOAD_M0(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z)
+#define LOAD_BLOCK_PARTIAL(LOAD_M0, LOAD_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
+ LOAD_BLOCK_PARTIAL_STR(LOAD_M0, LOAD_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z)
+/** Load a block that can be partial in both x and y dimensions
+ *
+ * @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vload(s) will be invoked, thus incurring small performance penalty.
+ *
+ * The data to load is expected to have consecutive names for each row.
+ * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
+ * The Z offset is expected to have consecutive names.
+ * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
+ *
+ * @param[in] M0 The number of rows to load, for non-partial blocks. Supported: 1-16
+ * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
+ * @param[in] DATA_TYPE The data type of the vectors
+ * @param[in] BASENAME The basename of the variables
+ * @param[in] PTR The base pointer
+ * @param[in] OFFSET The offset within a row
+ * @param[in] STRIDE_Y The stride value in y-axis direction
+ * @param[in] Z The offset in z-axis direction
+ * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
+ * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
+ * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial load Y. True to use PARTIAL_STORE_M0 rather than M0.
+ * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial load X. True to use PARTIAL_STORE_N0 rather than N0.
+ */
+#define LOAD_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, \
+ PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
+ if (!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
+ { \
+ LOAD_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \
+ } \
+ else if ((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
+ { \
+ LOAD_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \
+ } \
+ else if (!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
+ { \
+ LOAD_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \
+ } \
+ else \
+ { \
+ LOAD_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \
+ }
+/** Load a block that can only be partial in x but not y.
+ *
+ * @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vload(s) will be invoked, thus incurring small performance penalty.
+ *
+ * The data to load is expected to have consecutive names for each row.
+ * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
+ * The Z offset is expected to have consecutive names.
+ * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
+ *
+ * @param[in] M0 The number of rows to load, for non-partial blocks. Supported: 1-16
+ * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
+ * @param[in] DATA_TYPE The data type of the vectors
+ * @param[in] BASENAME The basename of the variables
+ * @param[in] PTR The base pointer
+ * @param[in] OFFSET The offset within a row
+ * @param[in] STRIDE_Y The stride value in y-axis direction
+ * @param[in] Z The offset in z-axis direction
+ * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
+ * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial load X. True to use PARTIAL_STORE_N0 rather than N0.
+ */
+#define LOAD_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_N0, \
+ PARTIAL_COND_X) \
+ if (!(PARTIAL_COND_X)) \
+ { \
+ LOAD_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \
+ } \
+ else \
+ { \
+ LOAD_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \
+ }
+/** Load a block that can only be partial in y but not x.
+ *
+ * @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vload(s) will be invoked, thus incurring small performance penalty.
+ *
+ * The data to store is expected to have consecutive names for each row.
+ * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
+ * The Z offset is expected to have consecutive names.
+ * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
+ *
+ * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
+ * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
+ * @param[in] DATA_TYPE The data type of the vectors
+ * @param[in] BASENAME The basename of the variables
+ * @param[in] PTR The base pointer
+ * @param[in] OFFSET The offset within a row
+ * @param[in] STRIDE_Y The stride value in y-axis direction
+ * @param[in] Z The offset in z-axis direction
+ * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
+ * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
+ */
+#define LOAD_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, \
+ PARTIAL_COND_Y) \
+ if (!(PARTIAL_COND_Y)) \
+ { \
+ LOAD_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \
+ } \
+ else \
+ { \
+ LOAD_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \
+ }
+/** @} */ // end of group LOAD_BLOCK_PARTIAL
+/** Boundary-aware GeMM block load
+ * @name LOAD_BLOCK_BOUNDARY_AWARE
+ * This macro assumes the following schemes to achieve boundary-awareness:
+ * - Overlapping load in Y axis from lhs tensor. This implies lhs has no padding along y dim.
+ * - Non-Overlapping(normal) load from rhs tensor. This imples rhs can have paddings.
+ * - Overlapping load in Y axis from bias tensor. This implies rhs has no padding along y dim.
+ * The macro then ensures that the src tensor can be loaded without any paddings in both x and y dim.
+ *
+ * In the y dimension, we place the partial blocks **at the beginning** while in the x dimension, we place the partial
+ * blocks **at the end**.
+ * Say, the src tensor is of shape MxN and we have M0 and N0 as the block size, this is how we define "partial blocks"/
+ * "boundary block" (we use the 2 terms "partial blocks" and "boundary blocks" interchangeably) and its various parameters:
+ *
+ * *--x--> x == 0 x == 1
+ * | |<------------------------------N-------------------------->|
+ * y |<--------------N0------------->|<----PARTIAL_STORE_N0----->|
+ * | -------------#############################################################
+ * * | | |...............................|...........................|
+ * y == 0 | PAR_..._M0 |......Boundary block in y......|.Boundary block in x and y.|
+ * | | |...............................|...........................|
+ * M --#############################################################
+ * | | | |...........................|
+ * y == 1 | M0 | Non-boundary block |....Boundary block in x....|
+ * | | | |...........................|
+ * |------------#############################################################
+ *
+ * Then @p PARTIAL_STORE_M0 = M % M0 and @p PARTIAL_STORE_N0 = N % N0
+ *
+ * @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vload(s) will be invoked, thus incurring small performance penalty.
+ *
+ * It automatically detects if a giving M,N,M0,N0 combination can yield partial blocks in either X and Y dimension,
+ * and select corresponding load methods such that the boundary detection logic is only added when needed.
+ *
+ * The data to load is expected to have consecutive names for each row.
+ * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
+ * The Z offset is expected to have consecutive names.
+ * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
+ *
+ * The macro will result in a declaration of @p M0 vectors of size @p N0 with data
+ * type @p DATA_TYPE containing values partially loaded from the specified
+ * address in memory. The remaining (N0 - PARTIAL_STORE_N0) elements will be
+ * filled with zeros.
+ *
+ * @param[in] M0 The number of rows to load, for non-partial blocks. Supported: 1-16
+ * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
+ * @param[in] DATA_TYPE The data type of the vectors
+ * @param[in] BASENAME The basename of the variables
+ * @param[in] PTR The base pointer
+ * @param[in] OFFSET The offset within a row
+ * @param[in] STRIDE_Y The stride value in y-axis direction
+ * @param[in] Z The offset in z-axis direction
+ * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
+ * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported: [0, @p N0)
+ * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial load Y. True to use PARTIAL_STORE_M0 rather than M0.
+ * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial load X. True to use PARTIAL_STORE_N0 rather than N0.
+ * @{
+ */
+#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
+// Case1: No partial blocks in either x or y
+#define LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, \
+ PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
+ LOAD_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z)
+
+#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
+// Case2: Partial blocks in y
+#define LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, \
+ PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
+ REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), BASENAME, 0); \
+ LOAD_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
+
+#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
+// Case3: Partial blocks in x
+#define LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, \
+ PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
+ REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), BASENAME, 0); \
+ LOAD_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
+
+#else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
+// Case4: Partial blocks in both x and y
+#define LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, \
+ PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
+ REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), BASENAME, 0); \
+ LOAD_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, \
+ PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
+
+#endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
+/** @} */ // end of group LOAD_BLOCK_BOUNDARY_AWARE
+
/** Loads the rows from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1).
* @name LOAD_TEXTURE2D_ROW_n
*
@@ -493,8 +809,10 @@
* @param[in] Y_STEP_ROW The incremental step row for the y coordinate (in pixels)
* @{
*/
-#define LOAD_TEXTURE2D_STR(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) LOAD_TEXTURE2D_ROW_##M0(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW)
-#define LOAD_TEXTURE2D(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) LOAD_TEXTURE2D_STR(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW)
+#define LOAD_TEXTURE2D_STR(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
+ LOAD_TEXTURE2D_ROW_##M0(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW)
+#define LOAD_TEXTURE2D(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
+ LOAD_TEXTURE2D_STR(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW)
/** @} */ // end of group LOAD_TEXTURE2D
/** Loads the rows from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1) passing the Y index for each row to be loaded.
@@ -513,7 +831,7 @@
#define LOAD_ROW_INDIRECT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##0; \
- if(Y_MASK##0 != 0) \
+ if (Y_MASK##0 != 0) \
BASENAME##0 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##0 * STRIDE_Y)); \
else \
BASENAME##0 = 0;
@@ -522,7 +840,7 @@
LOAD_ROW_INDIRECT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##1; \
- if(Y_MASK##1 != 0) \
+ if (Y_MASK##1 != 0) \
BASENAME##1 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##1 * STRIDE_Y)); \
else \
BASENAME##1 = 0;
@@ -531,7 +849,7 @@
LOAD_ROW_INDIRECT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##2; \
- if(Y_MASK##2 != 0) \
+ if (Y_MASK##2 != 0) \
BASENAME##2 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##2 * STRIDE_Y)); \
else \
BASENAME##2 = 0;
@@ -540,7 +858,7 @@
LOAD_ROW_INDIRECT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##3; \
- if(Y_MASK##3 != 0) \
+ if (Y_MASK##3 != 0) \
BASENAME##3 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##3 * STRIDE_Y)); \
else \
BASENAME##3 = 0;
@@ -549,7 +867,7 @@
LOAD_ROW_INDIRECT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##4; \
- if(Y_MASK##4 != 0) \
+ if (Y_MASK##4 != 0) \
BASENAME##4 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##4 * STRIDE_Y)); \
else \
BASENAME##4 = 0;
@@ -558,7 +876,7 @@
LOAD_ROW_INDIRECT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##5; \
- if(Y_MASK##5 != 0) \
+ if (Y_MASK##5 != 0) \
BASENAME##5 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##5 * STRIDE_Y)); \
else \
BASENAME##5 = 0;
@@ -567,7 +885,7 @@
LOAD_ROW_INDIRECT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##6; \
- if(Y_MASK##6 != 0) \
+ if (Y_MASK##6 != 0) \
BASENAME##6 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##6 * STRIDE_Y)); \
else \
BASENAME##6 = 0;
@@ -576,7 +894,7 @@
LOAD_ROW_INDIRECT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##7; \
- if(Y_MASK##7 != 0) \
+ if (Y_MASK##7 != 0) \
BASENAME##7 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##7 * STRIDE_Y)); \
else \
BASENAME##7 = 0;
@@ -585,7 +903,7 @@
LOAD_ROW_INDIRECT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##8; \
- if(Y_MASK##8 != 0) \
+ if (Y_MASK##8 != 0) \
BASENAME##8 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##8 * STRIDE_Y)); \
else \
BASENAME##8 = 0;
@@ -594,7 +912,7 @@
LOAD_ROW_INDIRECT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##9; \
- if(Y_MASK##9 != 0) \
+ if (Y_MASK##9 != 0) \
BASENAME##9 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##9 * STRIDE_Y)); \
else \
BASENAME##9 = 0;
@@ -603,7 +921,7 @@
LOAD_ROW_INDIRECT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##A; \
- if(Y_MASK##A != 0) \
+ if (Y_MASK##A != 0) \
BASENAME##A = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##A * STRIDE_Y)); \
else \
BASENAME##A = 0;
@@ -612,7 +930,7 @@
LOAD_ROW_INDIRECT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##B; \
- if(Y_MASK##B != 0) \
+ if (Y_MASK##B != 0) \
BASENAME##B = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##B * STRIDE_Y)); \
else \
BASENAME##B = 0;
@@ -621,7 +939,7 @@
LOAD_ROW_INDIRECT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##C; \
- if(Y_MASK##C != 0) \
+ if (Y_MASK##C != 0) \
BASENAME##C = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##C * STRIDE_Y)); \
else \
BASENAME##C = 0;
@@ -630,7 +948,7 @@
LOAD_ROW_INDIRECT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##D; \
- if(Y_MASK##D != 0) \
+ if (Y_MASK##D != 0) \
BASENAME##D = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##D * STRIDE_Y)); \
else \
BASENAME##D = 0;
@@ -639,7 +957,7 @@
LOAD_ROW_INDIRECT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##E; \
- if(Y_MASK##E != 0) \
+ if (Y_MASK##E != 0) \
BASENAME##E = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##E * STRIDE_Y)); \
else \
BASENAME##E = 0;
@@ -648,10 +966,11 @@
LOAD_ROW_INDIRECT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##F; \
- if(Y_MASK##F != 0) \
+ if (Y_MASK##F != 0) \
BASENAME##F = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##F * STRIDE_Y)); \
else \
BASENAME##F = 0;
+/** @} */ // end of group LOAD_ROW_INDIRECT_n
/** Load blocks (consecutive rows and columns) with Y offset.
* @name LOAD_BLOCK_INDIRECT
@@ -673,8 +992,11 @@
* @param[in] Y_MASK The y-axis mask vector. If 0, forces BASENAMEn to 0
* @{
*/
-#define LOAD_BLOCK_INDIRECT_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) LOAD_ROW_INDIRECT_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK)
-#define LOAD_BLOCK_INDIRECT(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) LOAD_BLOCK_INDIRECT_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK)
+#define LOAD_BLOCK_INDIRECT_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK)
+#define LOAD_BLOCK_INDIRECT(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_BLOCK_INDIRECT_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK)
+/** @} */ // end of group LOAD_BLOCK_INDIRECT
/** Loads the elements from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1).
* @name LOAD_ELEMENT_n
@@ -784,8 +1106,10 @@
* @param[in] STRIDE_Y The stride in y-axis direction
* @{
*/
-#define LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) LOAD_ELEMENT_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y)
-#define LOAD_SCALAR_AS_VECTOR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y)
+#define LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
+ LOAD_ELEMENT_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y)
+#define LOAD_SCALAR_AS_VECTOR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
+ LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y)
/** @} */ // end of group LOAD_SCALAR_AS_VECTOR
/** Basic macros to calculate Z offset values from Z0 to Zn-1
@@ -883,8 +1207,10 @@
* @param[in] STRIDE_Y The stride value in y-axis direction
* @{
*/
-#define CALCULATE_Z_OFFSET_STR(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) CALCULATE_Z_OFFSET_##M0(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y)
-#define CALCULATE_Z_OFFSET(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) CALCULATE_Z_OFFSET_STR(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y)
+#define CALCULATE_Z_OFFSET_STR(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
+ CALCULATE_Z_OFFSET_##M0(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y)
+#define CALCULATE_Z_OFFSET(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
+ CALCULATE_Z_OFFSET_STR(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y)
/** @} */ // end of group CALCULATE_Z_OFFSET
/** Scale the rows in the given variables (BASENAME0 to BASENAMEn-1)
@@ -895,8 +1221,7 @@
* @param[in] SCALE The scale factor
* @{
*/
-#define SCALE_ROW_1(DATA_TYPE, BASENAME, SCALE) \
- BASENAME##0 *= (DATA_TYPE)SCALE;
+#define SCALE_ROW_1(DATA_TYPE, BASENAME, SCALE) BASENAME##0 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_2(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_1(DATA_TYPE, BASENAME, SCALE) \
@@ -971,7 +1296,7 @@
* @{
*/
#define SCALE_BLOCK_STR(N, DATA_TYPE, BASENAME, SCALE) SCALE_ROW_##N(DATA_TYPE, BASENAME, SCALE)
-#define SCALE_BLOCK(N, DATA_TYPE, BASENAME, SCALE) SCALE_BLOCK_STR(N, DATA_TYPE, BASENAME, SCALE)
+#define SCALE_BLOCK(N, DATA_TYPE, BASENAME, SCALE) SCALE_BLOCK_STR(N, DATA_TYPE, BASENAME, SCALE)
/** @} */ // end of group SCALE_BLOCK
/** Create a new vector containing the values at the given index for a set of given vectors
@@ -983,8 +1308,7 @@
* @param[in] TYPE The data type of the destination vectors
* @{
*/
-#define COLUMN_VECTOR1(IDX_COL, BASENAME, X, TYPE) \
- TYPE BASENAME##IDX_COL = (TYPE)((X##0).s##IDX_COL);
+#define COLUMN_VECTOR1(IDX_COL, BASENAME, X, TYPE) TYPE BASENAME##IDX_COL = (TYPE)((X##0).s##IDX_COL);
#define COLUMN_VECTOR2(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 2) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 2))((X##0).s##IDX_COL, (X##1).s##IDX_COL);
@@ -993,13 +1317,20 @@
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 3))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL);
#define COLUMN_VECTOR4(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 4) \
- BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 4))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL);
-#define COLUMN_VECTOR8(IDX_COL, BASENAME, X, TYPE) \
- VEC_DATA_TYPE(TYPE, 8) \
- BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 8))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL, (X##4).s##IDX_COL, (X##5).s##IDX_COL, (X##6).s##IDX_COL, (X##7).s##IDX_COL);
-#define COLUMN_VECTOR16(IDX_COL, BASENAME, X, TYPE) \
- VEC_DATA_TYPE(TYPE, 16) \
- BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 16))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL, (X##4).s##IDX_COL, (X##5).s##IDX_COL, (X##6).s##IDX_COL, (X##7).s##IDX_COL, (X##8).s##IDX_COL, (X##9).s##IDX_COL, (X##A).s##IDX_COL, (X##B).s##IDX_COL, (X##C).s##IDX_COL, (X##D).s##IDX_COL, (X##E).s##IDX_COL, (X##F).s##IDX_COL);
+ BASENAME##IDX_COL = \
+ (VEC_DATA_TYPE(TYPE, 4))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL);
+#define COLUMN_VECTOR8(IDX_COL, BASENAME, X, TYPE) \
+ VEC_DATA_TYPE(TYPE, 8) \
+ BASENAME##IDX_COL = \
+ (VEC_DATA_TYPE(TYPE, 8))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL, \
+ (X##4).s##IDX_COL, (X##5).s##IDX_COL, (X##6).s##IDX_COL, (X##7).s##IDX_COL);
+#define COLUMN_VECTOR16(IDX_COL, BASENAME, X, TYPE) \
+ VEC_DATA_TYPE(TYPE, 16) \
+ BASENAME##IDX_COL = \
+ (VEC_DATA_TYPE(TYPE, 16))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL, \
+ (X##4).s##IDX_COL, (X##5).s##IDX_COL, (X##6).s##IDX_COL, (X##7).s##IDX_COL, \
+ (X##8).s##IDX_COL, (X##9).s##IDX_COL, (X##A).s##IDX_COL, (X##B).s##IDX_COL, \
+ (X##C).s##IDX_COL, (X##D).s##IDX_COL, (X##E).s##IDX_COL, (X##F).s##IDX_COL);
/** @} */ // end of group COLUMN_VECTORn
/** Create a new vector containing the values at the given index. Utility macros for transposing a colum-vector
@@ -1011,8 +1342,7 @@
* @param[in] TYPE The data type of the destination vectors
* @{
*/
-#define COLUMN_VECTOR_SCALAR1(IDX_COL, BASENAME, X, TYPE) \
- TYPE BASENAME##IDX_COL = (TYPE)((X##0));
+#define COLUMN_VECTOR_SCALAR1(IDX_COL, BASENAME, X, TYPE) TYPE BASENAME##IDX_COL = (TYPE)((X##0));
#define COLUMN_VECTOR_SCALAR2(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 2) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 2))((X##0), (X##1));
@@ -1025,47 +1355,47 @@
#define COLUMN_VECTOR_SCALAR8(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 8) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 8))((X##0), (X##1), (X##2), (X##3), (X##4), (X##5), (X##6), (X##7));
-#define COLUMN_VECTOR_SCALAR16(IDX_COL, BASENAME, X, TYPE) \
- VEC_DATA_TYPE(TYPE, 16) \
- BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 16))((X##0), (X##1), (X##2), (X##3), (X##4), (X##5), (X##6), (X##7), (X##8), (X##9), (X##A), (X##B), (X##C), (X##D), (X##E), (X##F));
-/** @} */ // end of group COLUMN_VECTORn
+#define COLUMN_VECTOR_SCALAR16(IDX_COL, BASENAME, X, TYPE) \
+ VEC_DATA_TYPE(TYPE, 16) \
+ BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 16))((X##0), (X##1), (X##2), (X##3), (X##4), (X##5), (X##6), (X##7), \
+ (X##8), (X##9), (X##A), (X##B), (X##C), (X##D), (X##E), (X##F));
+/** @} */ // end of group COLUMN_VECTOR_SCALARn
/** Create transposed vectors of the given vectors
* @name TRANSPOSE_K0Xn
*
* @param[in] K0 The size of the source vectors
* @param[in] BASENAME The basename of transposed vectors
- * @param[in] B The basename of source vectors for transposition
+ * @param[in] BS The basename of source vectors for transposition
* @param[in] TYPE The data type of the transposed vectors
* @{
*/
-#define TRANSPOSE_K0X1(K0, BASENAME, B, TYPE) \
- COLUMN_VECTOR_SCALAR(K0, 0, BASENAME, B, TYPE);
-#define TRANSPOSE_K0X2(K0, BASENAME, B, TYPE) \
- COLUMN_VECTOR(K0, 0, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, 1, BASENAME, B, TYPE);
-#define TRANSPOSE_K0X3(K0, BASENAME, B, TYPE) \
- TRANSPOSE_K0X2(K0, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, 2, BASENAME, B, TYPE);
-#define TRANSPOSE_K0X4(K0, BASENAME, B, TYPE) \
- TRANSPOSE_K0X3(K0, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, 3, BASENAME, B, TYPE);
-#define TRANSPOSE_K0X8(K0, BASENAME, B, TYPE) \
- TRANSPOSE_K0X4(K0, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, 4, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, 5, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, 6, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, 7, BASENAME, B, TYPE);
-#define TRANSPOSE_K0X16(K0, BASENAME, B, TYPE) \
- TRANSPOSE_K0X8(K0, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, 8, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, 9, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, A, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, B, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, C, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, D, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, E, BASENAME, B, TYPE); \
- COLUMN_VECTOR(K0, F, BASENAME, B, TYPE);
+#define TRANSPOSE_K0X1(K0, BASENAME, BS, TYPE) COLUMN_VECTOR_SCALAR(K0, 0, BASENAME, BS, TYPE);
+#define TRANSPOSE_K0X2(K0, BASENAME, BS, TYPE) \
+ COLUMN_VECTOR(K0, 0, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, 1, BASENAME, BS, TYPE);
+#define TRANSPOSE_K0X3(K0, BASENAME, BS, TYPE) \
+ TRANSPOSE_K0X2(K0, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, 2, BASENAME, BS, TYPE);
+#define TRANSPOSE_K0X4(K0, BASENAME, BS, TYPE) \
+ TRANSPOSE_K0X3(K0, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, 3, BASENAME, BS, TYPE);
+#define TRANSPOSE_K0X8(K0, BASENAME, BS, TYPE) \
+ TRANSPOSE_K0X4(K0, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, 4, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, 5, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, 6, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, 7, BASENAME, BS, TYPE);
+#define TRANSPOSE_K0X16(K0, BASENAME, BS, TYPE) \
+ TRANSPOSE_K0X8(K0, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, 8, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, 9, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, A, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, B, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, C, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, D, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, E, BASENAME, BS, TYPE); \
+ COLUMN_VECTOR(K0, F, BASENAME, BS, TYPE);
/** @} */ // end of group TRANSPOSE_K0Xn
@@ -1074,37 +1404,37 @@
* @param[in] K0 The number of source vectors
* @param[in] IDX_COL The index value
* @param[in] BASENAME The basename of the destination vectors
- * @param[in] B The basename of the source vectors
+ * @param[in] BS The basename of the source vectors
* @param[in] TYPE The data type of the destination vectors
*/
-#define COLUMN_VECTOR(K0, IDX_COL, BASENAME, B, TYPE) \
- CONCAT(COLUMN_VECTOR, K0) \
- (IDX_COL, BASENAME, B, TYPE);
+#define COLUMN_VECTOR(K0, IDX_COL, BASENAME, BS, TYPE) \
+ CONCAT(COLUMN_VECTOR, K0) \
+ (IDX_COL, BASENAME, BS, TYPE);
/** Create column vectors to contain the values at the given index. Utility macro for transposing a column-vector
*
* @param[in] K0 The number of source vectors
* @param[in] IDX_COL The index value
* @param[in] BASENAME The basename of the destination vectors
- * @param[in] B The basename of the source vectors
+ * @param[in] BS The basename of the source vectors
* @param[in] TYPE The data type of the destination vectors
*/
-#define COLUMN_VECTOR_SCALAR(K0, IDX_COL, BASENAME, B, TYPE) \
- CONCAT(COLUMN_VECTOR_SCALAR, K0) \
- (IDX_COL, BASENAME, B, TYPE);
+#define COLUMN_VECTOR_SCALAR(K0, IDX_COL, BASENAME, BS, TYPE) \
+ CONCAT(COLUMN_VECTOR_SCALAR, K0) \
+ (IDX_COL, BASENAME, BS, TYPE);
/** Create transposed vectors form the given source vectors
*
* @param[in] K0 The size of source vectors
* @param[in] N0 The number of source vectors
* @param[in] BASENAME The basename of transposed vectors
- * @param[in] B The basename of source vectors for transposition
+ * @param[in] BS The basename of source vectors for transposition
* @param[in] TYPE The data type of the transposed vectors
*
*/
-#define TRANSPOSE_K0XN0(K0, N0, BASENAME, B, TYPE) \
- CONCAT(TRANSPOSE_K0X, N0) \
- (K0, BASENAME, B, TYPE);
+#define TRANSPOSE_K0XN0(K0, N0, BASENAME, BS, TYPE) \
+ CONCAT(TRANSPOSE_K0X, N0) \
+ (K0, BASENAME, BS, TYPE);
/** Add the variables (BIAS0 to BIASn-1) to the others (BASENAME0 to BASENAMEn-1)
* @name ADD_ROW_n
@@ -1113,8 +1443,7 @@
* @param[in] BIAS The basename of the added variables
* @{
*/
-#define ADD_ROW_1(BASENAME, BIAS) \
- BASENAME##0 += BIAS##0;
+#define ADD_ROW_1(BASENAME, BIAS) BASENAME##0 += BIAS##0;
#define ADD_ROW_2(BASENAME, BIAS) \
ADD_ROW_1(BASENAME, BIAS) \
@@ -1189,7 +1518,7 @@
* @{
*/
#define ADD_BLOCK_STR(N, BASENAME, BIAS) ADD_ROW_##N(BASENAME, BIAS)
-#define ADD_BLOCK(N, BASENAME, BIAS) ADD_BLOCK_STR(N, BASENAME, BIAS)
+#define ADD_BLOCK(N, BASENAME, BIAS) ADD_BLOCK_STR(N, BASENAME, BIAS)
/** @} */ // end of group ADD_BLOCK
/** Broadcast (add single value) to the each element of the destination variables
@@ -1199,8 +1528,7 @@
* @param[in] BIAS The variable containing the value to add
* @{
*/
-#define ADD_ROW_BROADCAST_1(BASENAME, BIAS) \
- BASENAME##0 += BIAS;
+#define ADD_ROW_BROADCAST_1(BASENAME, BIAS) BASENAME##0 += BIAS;
#define ADD_ROW_BROADCAST_2(BASENAME, BIAS) \
ADD_ROW_BROADCAST_1(BASENAME, BIAS) \
@@ -1261,6 +1589,7 @@
#define ADD_ROW_BROADCAST_16(BASENAME, BIAS) \
ADD_ROW_BROADCAST_15(BASENAME, BIAS) \
BASENAME##F += BIAS;
+/** @} */ // end of group ADD_ROW_BROADCAST_n
/** Broadcast (add a value) to the each element of the destination block (BASENAME)
* @name ADD_BLOCK_BROADCAST
@@ -1273,7 +1602,7 @@
* @{
*/
#define ADD_BLOCK_BROADCAST_STR(N, BASENAME, BIAS) ADD_ROW_BROADCAST_##N(BASENAME, BIAS)
-#define ADD_BLOCK_BROADCAST(N, BASENAME, BIAS) ADD_BLOCK_BROADCAST_STR(N, BASENAME, BIAS)
+#define ADD_BLOCK_BROADCAST(N, BASENAME, BIAS) ADD_BLOCK_BROADCAST_STR(N, BASENAME, BIAS)
/** @} */ // end of group ADD_BLOCK_BROADCAST
/** Apply activation to the given variables
@@ -1363,8 +1692,10 @@
* @param[in] B_VAL Additional value required by the activation
* @{
*/
-#define ACTIVATION_BLOCK_STR(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) ACTIVATION_ROW_##N(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL)
-#define ACTIVATION_BLOCK(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) ACTIVATION_BLOCK_STR(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL)
+#define ACTIVATION_BLOCK_STR(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
+ ACTIVATION_ROW_##N(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL)
+#define ACTIVATION_BLOCK(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
+ ACTIVATION_BLOCK_STR(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL)
/** @} */ // end of group ACTIVATION_BLOCK
/** Apply convert_<data_type> to the given variables
@@ -1374,6 +1705,7 @@
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME_SRC The basename of the source variables
* @param[in] BASENAME_DST The basename of the destination variables
+ * @{
*/
#define CONVERT_ROW_1(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
@@ -1465,7 +1797,10 @@
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME_SRC The basename of the source variables
* @param[in] BASENAME_DST The basename of the destination variables
+ * @{
*/
-#define CONVERT_BLOCK_STR(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) CONVERT_ROW_##M(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST)
-#define CONVERT_BLOCK(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) CONVERT_BLOCK_STR(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST)
-/** @} */ // end of group CONVERT_BLOCK \ No newline at end of file
+#define CONVERT_BLOCK_STR(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
+ CONVERT_ROW_##M(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST)
+#define CONVERT_BLOCK(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
+ CONVERT_BLOCK_STR(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST)
+/** @} */ // end of group CONVERT_BLOCK
diff --git a/src/core/CL/cl_kernels/gemm_v1.cl b/src/core/CL/cl_kernels/gemm_v1.cl
deleted file mode 100644
index 5f8b4f694e..0000000000
--- a/src/core/CL/cl_kernels/gemm_v1.cl
+++ /dev/null
@@ -1,3238 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "gemm_helpers.h"
-#include "repeat.h"
-
-#if defined(M) && defined(N) && defined(K) && defined(H0) && defined(V0) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
-/** This OpenCL kernel is optimised for Midgard. It computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1)
- *
- * @note The number of rows of destination matrix must be passed at compile time using -DM
- * @note The number of columns of the destination matrix must be passed at compile time using -DN
- * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2)
- * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2)
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time:
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_interleaved_transposed_f32(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- int x = get_global_id(0) / H0;
- int y = get_global_id(1) / V0;
- int z = get_global_id(2);
-
- // Offset
- const int offset_row_a = (get_global_id(1) % V0) * 4;
- const int offset_row_b = (get_global_id(0) % H0) * 4;
-
- // src_addr_a = address of matrix A
- // src_addr_b = address of matrix B
- int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes;
- int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes;
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src1_addr_in_bytes += z * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- __global float *src_addr_a = (__global float *)(src0_ptr + src0_addr_in_bytes);
- __global float *src_addr_b = (__global float *)(src1_ptr + src1_addr_in_bytes);
-
- // Compute end row address for matrix B
- __global float *src_end_addr_b = src_addr_b + (src1_stride_y / sizeof(float));
-
- src_addr_a += offset_row_a;
- src_addr_b += offset_row_b;
-
- // Reset accumulators
- float4 c0 = 0.0f;
- float4 c1 = 0.0f;
- float4 c2 = 0.0f;
- float4 c3 = 0.0f;
-
- for(; src_addr_b <= (src_end_addr_b - (int)(8 * H0)); src_addr_a += 8 * V0, src_addr_b += 8 * H0)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- float4 a0 = vload4(0, src_addr_a);
- float4 b0 = vload4(0, src_addr_b);
-
- c0 += (float4)a0.s0 * b0;
- c1 += (float4)a0.s1 * b0;
- c2 += (float4)a0.s2 * b0;
- c3 += (float4)a0.s3 * b0;
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload4(0, src_addr_a + 4 * V0);
- b0 = vload4(0, src_addr_b + 4 * H0);
-
- c0 += (float4)a0.s0 * b0;
- c1 += (float4)a0.s1 * b0;
- c2 += (float4)a0.s2 * b0;
- c3 += (float4)a0.s3 * b0;
- }
-
- for(; src_addr_b < src_end_addr_b; src_addr_a += 4 * V0, src_addr_b += 4 * H0)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- float4 a0 = vload4(0, src_addr_a);
- float4 b0 = vload4(0, src_addr_b);
-
- c0 += (float4)a0.s0 * b0;
- c1 += (float4)a0.s1 * b0;
- c2 += (float4)a0.s2 * b0;
- c3 += (float4)a0.s3 * b0;
- }
-
- // Compute destination address
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
- // Compute dst address
- __global uchar *dst_addr = offset(&dst, 0, 0);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(4, float, c, ALPHA);
-#endif // defined(ALPHA)
-
- // Add beta*bias
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float));
-
- LOAD_BLOCK(1, 4, float, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, float, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias[broadcasted]
- ADD_BLOCK_BROADCAST(4, c, bias0);
-
-#else // defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id(
- 2) * src2_stride_z;
-
- LOAD_BLOCK(4, 4, float, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(4, float, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias
- ADD_BLOCK(4, c, bias);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(4, ACTIVATION_TYPE, float, VEC_SIZE, c, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store 4x4 block
- const bool cond_y = ((get_global_id(1) + 1) * 4 >= M);
- const bool cond_x = ((get_global_id(0) + 1) * 4 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(4, 4, float, c, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-
-/** This OpenCL kernel is optimized for Bifrost and tt computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1)
- *
- * @note The number of rows of destination matrix must be passed at compile time using -DM
- * @note The number of columns of the destination matrix must be passed at compile time using -DN
- * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2)
- * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2)
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time:
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_interleaved_transposed_f32_bifrost(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- int x = get_global_id(0) / H0;
- int y = get_global_id(1) / V0;
- int z = get_global_id(2);
-
- // Offset
- const int offset_row_a = (get_global_id(1) % V0) * 4;
- const int offset_row_b = (get_global_id(0) % H0) * 4;
-
- // src_addr_a = address of matrix A
- // src_addr_b = address of matrix B
- int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes;
- int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes;
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src1_addr_in_bytes += z * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- __global float *src_addr_a = (__global float *)(src0_ptr + src0_addr_in_bytes);
- __global float *src_addr_b = (__global float *)(src1_ptr + src1_addr_in_bytes);
-
- src_addr_a += offset_row_a;
- src_addr_b += offset_row_b;
-
- // Reset accumulators
- float4 c0 = 0.0f;
- float4 c1 = 0.0f;
- float4 c2 = 0.0f;
- float4 c3 = 0.0f;
-
- int i = 0;
- for(; i <= (int)(K - 4); i += 4)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- float4 a0 = vload4(0, src_addr_a);
- float4 b0 = vload4(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 4 * H0;
-
- c0.s0 = fma(a0.s0, b0.s0, c0.s0);
- c0.s1 = fma(a0.s0, b0.s1, c0.s1);
- c0.s2 = fma(a0.s0, b0.s2, c0.s2);
- c0.s3 = fma(a0.s0, b0.s3, c0.s3);
-
- c1.s0 = fma(a0.s1, b0.s0, c1.s0);
- c1.s1 = fma(a0.s1, b0.s1, c1.s1);
- c1.s2 = fma(a0.s1, b0.s2, c1.s2);
- c1.s3 = fma(a0.s1, b0.s3, c1.s3);
-
- c2.s0 = fma(a0.s2, b0.s0, c2.s0);
- c2.s1 = fma(a0.s2, b0.s1, c2.s1);
- c2.s2 = fma(a0.s2, b0.s2, c2.s2);
- c2.s3 = fma(a0.s2, b0.s3, c2.s3);
-
- c3.s0 = fma(a0.s3, b0.s0, c3.s0);
- c3.s1 = fma(a0.s3, b0.s1, c3.s1);
- c3.s2 = fma(a0.s3, b0.s2, c3.s2);
- c3.s3 = fma(a0.s3, b0.s3, c3.s3);
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload4(0, src_addr_a);
- b0 = vload4(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 4 * H0;
-
- c0.s0 = fma(a0.s0, b0.s0, c0.s0);
- c0.s1 = fma(a0.s0, b0.s1, c0.s1);
- c0.s2 = fma(a0.s0, b0.s2, c0.s2);
- c0.s3 = fma(a0.s0, b0.s3, c0.s3);
-
- c1.s0 = fma(a0.s1, b0.s0, c1.s0);
- c1.s1 = fma(a0.s1, b0.s1, c1.s1);
- c1.s2 = fma(a0.s1, b0.s2, c1.s2);
- c1.s3 = fma(a0.s1, b0.s3, c1.s3);
-
- c2.s0 = fma(a0.s2, b0.s0, c2.s0);
- c2.s1 = fma(a0.s2, b0.s1, c2.s1);
- c2.s2 = fma(a0.s2, b0.s2, c2.s2);
- c2.s3 = fma(a0.s2, b0.s3, c2.s3);
-
- c3.s0 = fma(a0.s3, b0.s0, c3.s0);
- c3.s1 = fma(a0.s3, b0.s1, c3.s1);
- c3.s2 = fma(a0.s3, b0.s2, c3.s2);
- c3.s3 = fma(a0.s3, b0.s3, c3.s3);
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload4(0, src_addr_a);
- b0 = vload4(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 4 * H0;
-
- c0.s0 = fma(a0.s0, b0.s0, c0.s0);
- c0.s1 = fma(a0.s0, b0.s1, c0.s1);
- c0.s2 = fma(a0.s0, b0.s2, c0.s2);
- c0.s3 = fma(a0.s0, b0.s3, c0.s3);
-
- c1.s0 = fma(a0.s1, b0.s0, c1.s0);
- c1.s1 = fma(a0.s1, b0.s1, c1.s1);
- c1.s2 = fma(a0.s1, b0.s2, c1.s2);
- c1.s3 = fma(a0.s1, b0.s3, c1.s3);
-
- c2.s0 = fma(a0.s2, b0.s0, c2.s0);
- c2.s1 = fma(a0.s2, b0.s1, c2.s1);
- c2.s2 = fma(a0.s2, b0.s2, c2.s2);
- c2.s3 = fma(a0.s2, b0.s3, c2.s3);
-
- c3.s0 = fma(a0.s3, b0.s0, c3.s0);
- c3.s1 = fma(a0.s3, b0.s1, c3.s1);
- c3.s2 = fma(a0.s3, b0.s2, c3.s2);
- c3.s3 = fma(a0.s3, b0.s3, c3.s3);
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload4(0, src_addr_a);
- b0 = vload4(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 4 * H0;
-
- c0.s0 = fma(a0.s0, b0.s0, c0.s0);
- c0.s1 = fma(a0.s0, b0.s1, c0.s1);
- c0.s2 = fma(a0.s0, b0.s2, c0.s2);
- c0.s3 = fma(a0.s0, b0.s3, c0.s3);
-
- c1.s0 = fma(a0.s1, b0.s0, c1.s0);
- c1.s1 = fma(a0.s1, b0.s1, c1.s1);
- c1.s2 = fma(a0.s1, b0.s2, c1.s2);
- c1.s3 = fma(a0.s1, b0.s3, c1.s3);
-
- c2.s0 = fma(a0.s2, b0.s0, c2.s0);
- c2.s1 = fma(a0.s2, b0.s1, c2.s1);
- c2.s2 = fma(a0.s2, b0.s2, c2.s2);
- c2.s3 = fma(a0.s2, b0.s3, c2.s3);
-
- c3.s0 = fma(a0.s3, b0.s0, c3.s0);
- c3.s1 = fma(a0.s3, b0.s1, c3.s1);
- c3.s2 = fma(a0.s3, b0.s2, c3.s2);
- c3.s3 = fma(a0.s3, b0.s3, c3.s3);
- }
-
- for(; i < (int)K; ++i)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- float4 a0 = vload4(0, src_addr_a);
- float4 b0 = vload4(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 4 * H0;
-
- c0.s0 = fma(a0.s0, b0.s0, c0.s0);
- c0.s1 = fma(a0.s0, b0.s1, c0.s1);
- c0.s2 = fma(a0.s0, b0.s2, c0.s2);
- c0.s3 = fma(a0.s0, b0.s3, c0.s3);
-
- c1.s0 = fma(a0.s1, b0.s0, c1.s0);
- c1.s1 = fma(a0.s1, b0.s1, c1.s1);
- c1.s2 = fma(a0.s1, b0.s2, c1.s2);
- c1.s3 = fma(a0.s1, b0.s3, c1.s3);
-
- c2.s0 = fma(a0.s2, b0.s0, c2.s0);
- c2.s1 = fma(a0.s2, b0.s1, c2.s1);
- c2.s2 = fma(a0.s2, b0.s2, c2.s2);
- c2.s3 = fma(a0.s2, b0.s3, c2.s3);
-
- c3.s0 = fma(a0.s3, b0.s0, c3.s0);
- c3.s1 = fma(a0.s3, b0.s1, c3.s1);
- c3.s2 = fma(a0.s3, b0.s2, c3.s2);
- c3.s3 = fma(a0.s3, b0.s3, c3.s3);
- }
-
- // Compute destination address
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
- // Compute dst address
- __global uchar *dst_addr = offset(&dst, 0, 0);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(4, float, c, ALPHA);
-#endif // defined(ALPHA)
-
- // Add beta*bias
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float));
-
- LOAD_BLOCK(1, 4, float, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, float, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias[broadcasted]
- ADD_BLOCK_BROADCAST(4, c, bias0);
-
-#else // defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id(
- 2) * src2_stride_z;
-
- LOAD_BLOCK(4, 4, float, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(4, float, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias
- ADD_BLOCK(4, c, bias);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(4, ACTIVATION_TYPE, float, VEC_SIZE, c, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store 4x4 block
- const bool cond_y = ((get_global_id(1) + 1) * 4 >= M);
- const bool cond_x = ((get_global_id(0) + 1) * 4 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(4, 4, float, c, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-
-#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED)
-/** This OpenCL kernel computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1)
- *
- * @note The number of rows of destination matrix must be passed at compile time using -DM
- * @note The number of columns of the destination matrix must be passed at compile time using -DN
- * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2)
- * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2)
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time:
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- int x = get_global_id(0) / H0;
- int y = get_global_id(1) / V0;
- int z = get_global_id(2);
-
- // Offset
- const int offset_row_a = (get_global_id(1) % V0) * 4;
- const int offset_row_b = (get_global_id(0) % H0) * 8;
-
- // src_addr_a = address of matrix A
- // src_addr_b = address of matrix B
- int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes;
- int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes;
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src1_addr_in_bytes += z * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- __global half *src_addr_a = (__global half *)(src0_ptr + src0_addr_in_bytes);
- __global half *src_addr_b = (__global half *)(src1_ptr + src1_addr_in_bytes);
-
- // Compute end row address for matrix B
- __global half *src_end_addr_b = src_addr_b + (src1_stride_y / sizeof(half));
-
- src_addr_a += offset_row_a;
- src_addr_b += offset_row_b;
-
- // Reset accumulators
- half8 c0 = 0.0f;
- half8 c1 = 0.0f;
- half8 c2 = 0.0f;
- half8 c3 = 0.0f;
-
- for(; src_addr_b <= (src_end_addr_b - (int)(16 * H0)); src_addr_a += 8 * V0, src_addr_b += 16 * H0)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- half4 a0 = vload4(0, src_addr_a);
- half8 b0 = vload8(0, src_addr_b);
-
- c0 += (half8)a0.s0 * b0;
- c1 += (half8)a0.s1 * b0;
- c2 += (half8)a0.s2 * b0;
- c3 += (half8)a0.s3 * b0;
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload4(0, src_addr_a + 4 * V0);
- b0 = vload8(0, src_addr_b + 8 * H0);
-
- c0 += (half8)a0.s0 * b0;
- c1 += (half8)a0.s1 * b0;
- c2 += (half8)a0.s2 * b0;
- c3 += (half8)a0.s3 * b0;
- }
-
- for(; src_addr_b < src_end_addr_b; src_addr_a += 4 * V0, src_addr_b += 8 * H0)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- half4 a0 = vload4(0, src_addr_a);
- half8 b0 = vload8(0, src_addr_b);
-
- c0 += (half8)a0.s0 * b0;
- c1 += (half8)a0.s1 * b0;
- c2 += (half8)a0.s2 * b0;
- c3 += (half8)a0.s3 * b0;
- }
-
- // Compute destination address
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
- // Compute dst address
- __global uchar *dst_addr = offset(&dst, 0, 0);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(4, half, c, ALPHA);
-#endif // defined(ALPHA)
-
- // Add beta*bias
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half));
-
- LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, half, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias[broadcasted]
- ADD_BLOCK_BROADCAST(4, c, bias0);
-
-#else // defined(BROADCAST_BIAS)
-
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id(
- 2) * src2_stride_z;
-
- LOAD_BLOCK(4, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(4, half, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias
- ADD_BLOCK(4, c, bias);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(4, ACTIVATION_TYPE, half, VEC_SIZE, c, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store 4x8 block
- const bool cond_y = ((get_global_id(1) + 1) * 4 >= M);
- const bool cond_x = ((get_global_id(0) + 1) * 8 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(4, 8, half, c, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-
-/** This OpenCL kernel computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1) while accumulating the result in a 32 floating point variable.
- *
- * @note The number of rows of destination matrix must be passed at compile time using -DM
- * @note The number of columns of the destination matrix must be passed at compile time using -DN
- * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2)
- * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2)
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time:
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_interleaved_transposed_f16_acc32(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- int x = get_global_id(0) / H0;
- int y = get_global_id(1) / V0;
- int z = get_global_id(2);
-
- // Offset
- const int offset_row_a = (get_global_id(1) % V0) * 4;
- const int offset_row_b = (get_global_id(0) % H0) * 8;
-
- // src_addr_a = address of matrix A
- // src_addr_b = address of matrix B
- int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes;
- int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes;
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src1_addr_in_bytes += z * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- __global half *src_addr_a = (__global half *)(src0_ptr + src0_addr_in_bytes);
- __global half *src_addr_b = (__global half *)(src1_ptr + src1_addr_in_bytes);
-
- // Compute end row address for matrix B
- __global half *src_end_addr_b = src_addr_b + (src1_stride_y / sizeof(half));
-
- src_addr_a += offset_row_a;
- src_addr_b += offset_row_b;
-
- // Reset accumulators
- float8 c0 = 0.0f;
- float8 c1 = 0.0f;
- float8 c2 = 0.0f;
- float8 c3 = 0.0f;
-
- for(; src_addr_b <= (src_end_addr_b - (int)(16 * H0)); src_addr_a += 8 * V0, src_addr_b += 16 * H0)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- float4 a0 = convert_float4(vload4(0, src_addr_a));
- float8 b0 = convert_float8(vload8(0, src_addr_b));
-
- c0 += (float8)a0.s0 * b0;
- c1 += (float8)a0.s1 * b0;
- c2 += (float8)a0.s2 * b0;
- c3 += (float8)a0.s3 * b0;
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = convert_float4(vload4(0, src_addr_a + 4 * V0));
- b0 = convert_float8(vload8(0, src_addr_b + 8 * H0));
-
- c0 += (float8)a0.s0 * b0;
- c1 += (float8)a0.s1 * b0;
- c2 += (float8)a0.s2 * b0;
- c3 += (float8)a0.s3 * b0;
- }
-
- for(; src_addr_b < src_end_addr_b; src_addr_a += 4 * V0, src_addr_b += 8 * H0)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- float4 a0 = convert_float4(vload4(0, src_addr_a));
- float8 b0 = convert_float8(vload8(0, src_addr_b));
-
- c0 += (float8)a0.s0 * b0;
- c1 += (float8)a0.s1 * b0;
- c2 += (float8)a0.s2 * b0;
- c3 += (float8)a0.s3 * b0;
- }
-
- // Compute destination address
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
- // Compute dst address
- __global uchar *dst_addr = offset(&dst, 0, 0);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(4, float, c, ALPHA);
-#endif // defined(ALPHA)
-
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half));
-
- LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
- float8 bias_f0 = convert_float8(bias0);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, float, bias_f, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias[broadcasted]
- ADD_BLOCK_BROADCAST(4, c, bias_f0);
-
-#else // defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id(
- 2) * src2_stride_z;
-
- LOAD_BLOCK(4, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
- float8 bias_f0 = convert_float8(bias0);
- float8 bias_f1 = convert_float8(bias1);
- float8 bias_f2 = convert_float8(bias2);
- float8 bias_f3 = convert_float8(bias3);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(4, float, bias_f, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias
- ADD_BLOCK(4, c, bias_f);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
- half8 c_h0 = convert_half8(c0);
- half8 c_h1 = convert_half8(c1);
- half8 c_h2 = convert_half8(c2);
- half8 c_h3 = convert_half8(c3);
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(4, ACTIVATION_TYPE, half, VEC_SIZE, c_h, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store 4x8 block
- const bool cond_y = ((get_global_id(1) + 1) * 4 >= M);
- const bool cond_x = ((get_global_id(0) + 1) * 8 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(4, 8, half, c_h, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-
-/** This OpenCL kernel optimized for Bifrost architectures computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1)
- *
- * @note The number of rows of destination matrix must be passed at compile time using -DM
- * @note The number of columns of the destination matrix must be passed at compile time using -DN
- * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2)
- * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2)
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time:
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_interleaved_transposed_f16_bifrost(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- int x = get_global_id(0) / H0;
- int y = get_global_id(1) / V0;
- int z = get_global_id(2);
-
- // Offset
- const int offset_row_a = (get_global_id(1) % V0) * 4;
- const int offset_row_b = (get_global_id(0) % H0) * 8;
-
- // src_addr_a = address of matrix A
- // src_addr_b = address of matrix B
- int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes;
- int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes;
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src1_addr_in_bytes += z * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- __global half *src_addr_a = (__global half *)(src0_ptr + src0_addr_in_bytes);
- __global half *src_addr_b = (__global half *)(src1_ptr + src1_addr_in_bytes);
-
- src_addr_a += offset_row_a;
- src_addr_b += offset_row_b;
-
- // Reset accumulators
- half8 c0 = 0.0f;
- half8 c1 = 0.0f;
- half8 c2 = 0.0f;
- half8 c3 = 0.0f;
-
- int i = 0;
- for(; i <= (int)(K - 4); i += 4)
- {
-#if V0 == 1
- // Load values from matrix A (interleaved) and matrix B (transposed)
- half8 a0 = vload8(0, src_addr_a);
- half8 b0 = vload8(0, src_addr_b);
-
- src_addr_a += 8 * V0;
- src_addr_b += 8 * H0;
-
- c0 = fma((half8)a0.s0, b0, c0);
- c1 = fma((half8)a0.s1, b0, c1);
- c2 = fma((half8)a0.s2, b0, c2);
- c3 = fma((half8)a0.s3, b0, c3);
-
- // Load values from matrix B (transposed)
- b0 = vload8(0, src_addr_b);
-
- src_addr_b += 8 * H0;
-
- c0 = fma((half8)a0.s4, b0, c0);
- c1 = fma((half8)a0.s5, b0, c1);
- c2 = fma((half8)a0.s6, b0, c2);
- c3 = fma((half8)a0.s7, b0, c3);
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload8(0, src_addr_a);
- b0 = vload8(0, src_addr_b);
-
- src_addr_a += 8 * V0;
- src_addr_b += 8 * H0;
-
- c0 = fma((half8)a0.s0, b0, c0);
- c1 = fma((half8)a0.s1, b0, c1);
- c2 = fma((half8)a0.s2, b0, c2);
- c3 = fma((half8)a0.s3, b0, c3);
-
- // Load values from matrix B (transposed)
- b0 = vload8(0, src_addr_b);
-
- src_addr_b += 8 * H0;
-
- c0 = fma((half8)a0.s4, b0, c0);
- c1 = fma((half8)a0.s5, b0, c1);
- c2 = fma((half8)a0.s6, b0, c2);
- c3 = fma((half8)a0.s7, b0, c3);
-#else // V0 == 1
- // Load values from matrix A (interleaved) and matrix B (transposed)
- half4 a0 = vload4(0, src_addr_a);
- half8 b0 = vload8(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 8 * H0;
-
- c0 = fma((half8)a0.s0, b0, c0);
- c1 = fma((half8)a0.s1, b0, c1);
- c2 = fma((half8)a0.s2, b0, c2);
- c3 = fma((half8)a0.s3, b0, c3);
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload4(0, src_addr_a);
- b0 = vload8(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 8 * H0;
-
- c0 = fma((half8)a0.s0, b0, c0);
- c1 = fma((half8)a0.s1, b0, c1);
- c2 = fma((half8)a0.s2, b0, c2);
- c3 = fma((half8)a0.s3, b0, c3);
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload4(0, src_addr_a);
- b0 = vload8(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 8 * H0;
-
- c0 = fma((half8)a0.s0, b0, c0);
- c1 = fma((half8)a0.s1, b0, c1);
- c2 = fma((half8)a0.s2, b0, c2);
- c3 = fma((half8)a0.s3, b0, c3);
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload4(0, src_addr_a);
- b0 = vload8(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 8 * H0;
-
- c0 = fma((half8)a0.s0, b0, c0);
- c1 = fma((half8)a0.s1, b0, c1);
- c2 = fma((half8)a0.s2, b0, c2);
- c3 = fma((half8)a0.s3, b0, c3);
-#endif // V0 == 1
- }
-
- for(; i < (int)K; ++i)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- half4 a0 = vload4(0, src_addr_a);
- half8 b0 = vload8(0, src_addr_b);
-
- src_addr_a += 4 * V0;
- src_addr_b += 8 * H0;
-
- c0 = fma((half8)a0.s0, b0, c0);
- c1 = fma((half8)a0.s1, b0, c1);
- c2 = fma((half8)a0.s2, b0, c2);
- c3 = fma((half8)a0.s3, b0, c3);
- }
-
- // Compute destination address
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
- // Compute dst address
- __global uchar *dst_addr = offset(&dst, 0, 0);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(4, half, c, ALPHA);
-#endif // defined(ALPHA)
-
- // Add beta*bias
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half));
-
- LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, half, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias[broadcasted]
- ADD_BLOCK_BROADCAST(4, c, bias0);
-
-#else // defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id(
- 2) * src2_stride_z;
-
- LOAD_BLOCK(4, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(4, half, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias
- ADD_BLOCK(4, c, bias);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(4, ACTIVATION_TYPE, half, VEC_SIZE, c, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store 4x8 block
- const bool cond_y = ((get_global_id(1) + 1) * 4 >= M);
- const bool cond_x = ((get_global_id(0) + 1) * 8 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(4, 8, half, c, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-
-#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED)
-
-#endif // defined(M) && defined(N) && defined(K) && defined(H0) && defined(V0) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
-
-#if defined(N) && defined(K) && defined(M0) && defined(N0) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
-#if defined(DATA_TYPE)
-#define VECTOR_TYPE VEC_DATA_TYPE(DATA_TYPE, N0)
-/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not been reshaped.
- *
- * @note This OpenCL kernel works with floating point data types (F16/F32)
- * @note The floating point data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
- * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0
- * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16/F32
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D)
- * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements for the output tensor (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_floating_point(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint src_cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint dst_cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- int idx = get_global_id(0) * N0;
-
- // Compute starting address for matrix A and Matrix B
- int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
-
- // Update address for the matrix A
- src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y;
-
- // Update address for the matrix B
- src_addr.s1 += idx * sizeof(DATA_TYPE);
-
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D
- uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zin = min(DEPTH_GEMM3D - 1, zin);
-
- // Add offset due to the cross plane paddings
- zin *= (src_cross_plane_pad * src0_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply src0_stride_z by DEPTH_GEMM3D
- src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D;
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- // Add offset for batched GEMM
- src_addr.s0 += get_global_id(2) * src0_stride_z;
-
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src_addr.s1 += get_global_id(2) * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- int end_row_vec_a = src_addr.s0 + (K * sizeof(DATA_TYPE));
-
- VECTOR_TYPE acc0 = 0.0f;
-#if M0 > 1
- VECTOR_TYPE acc1 = 0.0f;
-#endif // M0 > 1
-#if M0 > 2
- VECTOR_TYPE acc2 = 0.0f;
-#endif // M0 > 2
-#if M0 > 3
- VECTOR_TYPE acc3 = 0.0f;
-#endif // M0 > 3
-
- for(; src_addr.s0 <= (end_row_vec_a - 2 * (int)sizeof(DATA_TYPE)); src_addr += (int2)(2 * sizeof(DATA_TYPE), 2 * src1_stride_y))
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- LOAD_BLOCK(M0, 2, DATA_TYPE, a, src0_ptr, src_addr.s0, src0_stride_y, zin.s);
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- VEC_DATA_TYPE(DATA_TYPE, 2)
- a0 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
-#if M0 > 1
- VEC_DATA_TYPE(DATA_TYPE, 2)
- a1 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // M0 > 1
-#if M0 > 2
- VEC_DATA_TYPE(DATA_TYPE, 2)
- a2 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // M0 > 2
-#if M0 > 3
- VEC_DATA_TYPE(DATA_TYPE, 2)
- a3 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // M0 > 3
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Load values from matrix B
- VECTOR_TYPE b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(src1_ptr + src_addr.s1));
- VECTOR_TYPE b1 = VLOAD(N0)(0, (__global DATA_TYPE *)(src1_ptr + src_addr.s1 + src1_stride_y));
-
- // Accumulate
- acc0 += b0 * (VECTOR_TYPE)a0.s0;
- acc0 += b1 * (VECTOR_TYPE)a0.s1;
-#if M0 > 1
- acc1 += b0 * (VECTOR_TYPE)a1.s0;
- acc1 += b1 * (VECTOR_TYPE)a1.s1;
-#endif // M0 > 1
-#if M0 > 2
- acc2 += b0 * (VECTOR_TYPE)a2.s0;
- acc2 += b1 * (VECTOR_TYPE)a2.s1;
-#endif // M0 > 2
-#if M0 > 3
- acc3 += b0 * (VECTOR_TYPE)a3.s0;
- acc3 += b1 * (VECTOR_TYPE)a3.s1;
-#endif // M0 > 3
- }
-
- for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(sizeof(DATA_TYPE), src1_stride_y))
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- DATA_TYPE a0 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0));
-#if M0 > 1
- DATA_TYPE a1 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1));
-#endif // M0 > 1
-#if M0 > 2
- DATA_TYPE a2 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2));
-#endif // M0 > 2
-#if M0 > 3
- DATA_TYPE a3 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3));
-#endif // M0 > 3
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- DATA_TYPE a0 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
-#if M0 > 1
- DATA_TYPE a1 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // M0 > 1
-#if M0 > 2
- DATA_TYPE a2 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // M0 > 2
-#if M0 > 3
- DATA_TYPE a3 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // M0 > 3
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Load values from matrix B
- VECTOR_TYPE b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(src1_ptr + src_addr.s1));
-
- // Accumulate
- acc0 += b0 * (VECTOR_TYPE)a0;
-#if M0 > 1
- acc1 += b0 * (VECTOR_TYPE)a1;
-#endif // M0 > 1
-#if M0 > 2
- acc2 += b0 * (VECTOR_TYPE)a2;
-#endif // M0 > 2
-#if M0 > 3
- acc3 += b0 * (VECTOR_TYPE)a3;
-#endif // M0 > 3
- }
-
- int z = get_global_id(2);
-
- // Compute dst address
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0,
- PARTIAL_STORE_M0)
- * dst_stride_y);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (dst_cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(M0, DATA_TYPE, acc, ALPHA);
-#endif // defined(ALPHA)
-
- // Add beta*bias
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
-
- LOAD_BLOCK(1, N0, DATA_TYPE, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias[broadcasted]
- ADD_BLOCK_BROADCAST(M0, acc, bias0);
-
-#else // defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0,
- PARTIAL_STORE_M0)
- * src2_stride_y)
- + z * src2_stride_z;
-
- LOAD_BLOCK(M0, N0, DATA_TYPE, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
-#endif // UNIT_BIAS
-
- // c = c + bias
- ADD_BLOCK(M0, acc, bias);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store output block
- const bool cond_y = get_global_id(1) == 0;
- const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, acc, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-#endif // defined(DATA_TYPE)
-
-/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not been reshaped
- *
- * @note This OpenCL kernel works with the 32-bit floating point data type (float) and uses the fma units.
- * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0.
- * @note This kernel processed a fixed number of elements along x: -DN0=4.
- * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D)
- * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_floating_point_f32_bifrost(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint src_cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint dst_cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- int idx = get_global_id(0) * N0;
-
- // Compute starting address for matrix A and matrix B
- int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
-
- // Update address for matrix A
- src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y;
-
- // Update address for matrix B
- src_addr.s1 += idx * sizeof(float);
-
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D
- uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zin = min(DEPTH_GEMM3D - 1, zin);
-
- // Add offset due to the cross plane paddings
- zin *= (src_cross_plane_pad * src0_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply src0_stride_z by DEPTH_GEMM3D
- src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D;
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- // Add offset for batched GEMM
- src_addr.s0 += get_global_id(2) * src0_stride_z;
-
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src_addr.s1 += get_global_id(2) * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- // Initialize accumulators
- float4 acc0 = 0.0f;
-
-#if M0 > 1
- float4 acc1 = 0.0f;
-#endif // M0 > 1
-
-#if M0 > 2
- float4 acc2 = 0.0f;
-#endif // M0 > 2
-
-#if M0 > 3
- float4 acc3 = 0.0f;
-#endif // M0 > 3
-
- // A and B src indices get incremented at the same time.
- int i = 0;
- for(; i <= ((int)K - 4); i += 4)
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A and matrix B
- LOAD_BLOCK(M0, 4, float, a, src0_ptr, src_addr.s0, src0_stride_y, zin.s);
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A and matrix B
- float4 a0 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
-#if M0 > 1
- float4 a1 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // M0 > 1
-#if M0 > 2
- float4 a2 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // M0 > 2
-#if M0 > 3
- float4 a3 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // M0 > 3
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
-
- // Multiply and accumulate
- acc0.s0 = fma(a0.s0, b0.s0, acc0.s0);
- acc0.s1 = fma(a0.s0, b0.s1, acc0.s1);
- acc0.s2 = fma(a0.s0, b0.s2, acc0.s2);
- acc0.s3 = fma(a0.s0, b0.s3, acc0.s3);
-
-#if M0 > 1
-
- acc1.s0 = fma(a1.s0, b0.s0, acc1.s0);
- acc1.s1 = fma(a1.s0, b0.s1, acc1.s1);
- acc1.s2 = fma(a1.s0, b0.s2, acc1.s2);
- acc1.s3 = fma(a1.s0, b0.s3, acc1.s3);
-
-#endif // M0 > 1
-#if M0 > 2
-
- acc2.s0 = fma(a2.s0, b0.s0, acc2.s0);
- acc2.s1 = fma(a2.s0, b0.s1, acc2.s1);
- acc2.s2 = fma(a2.s0, b0.s2, acc2.s2);
- acc2.s3 = fma(a2.s0, b0.s3, acc2.s3);
-
-#endif // M0 > 2
-#if M0 > 3
-
- acc3.s0 = fma(a3.s0, b0.s0, acc3.s0);
- acc3.s1 = fma(a3.s0, b0.s1, acc3.s1);
- acc3.s2 = fma(a3.s0, b0.s2, acc3.s2);
- acc3.s3 = fma(a3.s0, b0.s3, acc3.s3);
-#endif // M0 > 3
-
- // Load values from matrix A and matrix B
- b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
-
- // Multiply and accumulate
- acc0.s0 = fma(a0.s1, b0.s0, acc0.s0);
- acc0.s1 = fma(a0.s1, b0.s1, acc0.s1);
- acc0.s2 = fma(a0.s1, b0.s2, acc0.s2);
- acc0.s3 = fma(a0.s1, b0.s3, acc0.s3);
-
-#if M0 > 1
-
- acc1.s0 = fma(a1.s1, b0.s0, acc1.s0);
- acc1.s1 = fma(a1.s1, b0.s1, acc1.s1);
- acc1.s2 = fma(a1.s1, b0.s2, acc1.s2);
- acc1.s3 = fma(a1.s1, b0.s3, acc1.s3);
-
-#endif // M0 > 1
-#if M0 > 2
-
- acc2.s0 = fma(a2.s1, b0.s0, acc2.s0);
- acc2.s1 = fma(a2.s1, b0.s1, acc2.s1);
- acc2.s2 = fma(a2.s1, b0.s2, acc2.s2);
- acc2.s3 = fma(a2.s1, b0.s3, acc2.s3);
-
-#endif // M0 > 2
-#if M0 > 3
-
- acc3.s0 = fma(a3.s1, b0.s0, acc3.s0);
- acc3.s1 = fma(a3.s1, b0.s1, acc3.s1);
- acc3.s2 = fma(a3.s1, b0.s2, acc3.s2);
- acc3.s3 = fma(a3.s1, b0.s3, acc3.s3);
-#endif // M0 > 3
-
- // Load values from matrix A and matrix B
- b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
-
- // Multiply and accumulate
- acc0.s0 = fma(a0.s2, b0.s0, acc0.s0);
- acc0.s1 = fma(a0.s2, b0.s1, acc0.s1);
- acc0.s2 = fma(a0.s2, b0.s2, acc0.s2);
- acc0.s3 = fma(a0.s2, b0.s3, acc0.s3);
-
-#if M0 > 1
-
- acc1.s0 = fma(a1.s2, b0.s0, acc1.s0);
- acc1.s1 = fma(a1.s2, b0.s1, acc1.s1);
- acc1.s2 = fma(a1.s2, b0.s2, acc1.s2);
- acc1.s3 = fma(a1.s2, b0.s3, acc1.s3);
-
-#endif // M0 > 1
-#if M0 > 2
-
- acc2.s0 = fma(a2.s2, b0.s0, acc2.s0);
- acc2.s1 = fma(a2.s2, b0.s1, acc2.s1);
- acc2.s2 = fma(a2.s2, b0.s2, acc2.s2);
- acc2.s3 = fma(a2.s2, b0.s3, acc2.s3);
-
-#endif // M0 > 2
-#if M0 > 3
-
- acc3.s0 = fma(a3.s2, b0.s0, acc3.s0);
- acc3.s1 = fma(a3.s2, b0.s1, acc3.s1);
- acc3.s2 = fma(a3.s2, b0.s2, acc3.s2);
- acc3.s3 = fma(a3.s2, b0.s3, acc3.s3);
-#endif // M0 > 3
-
- // Load values from matrix A and matrix B
- b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
-
- // Multiply and accumulate
- acc0.s0 = fma(a0.s3, b0.s0, acc0.s0);
- acc0.s1 = fma(a0.s3, b0.s1, acc0.s1);
- acc0.s2 = fma(a0.s3, b0.s2, acc0.s2);
- acc0.s3 = fma(a0.s3, b0.s3, acc0.s3);
-
-#if M0 > 1
-
- acc1.s0 = fma(a1.s3, b0.s0, acc1.s0);
- acc1.s1 = fma(a1.s3, b0.s1, acc1.s1);
- acc1.s2 = fma(a1.s3, b0.s2, acc1.s2);
- acc1.s3 = fma(a1.s3, b0.s3, acc1.s3);
-
-#endif // M0 > 1
-#if M0 > 2
-
- acc2.s0 = fma(a2.s3, b0.s0, acc2.s0);
- acc2.s1 = fma(a2.s3, b0.s1, acc2.s1);
- acc2.s2 = fma(a2.s3, b0.s2, acc2.s2);
- acc2.s3 = fma(a2.s3, b0.s3, acc2.s3);
-
-#endif // M0 > 2
-#if M0 > 3
-
- acc3.s0 = fma(a3.s3, b0.s0, acc3.s0);
- acc3.s1 = fma(a3.s3, b0.s1, acc3.s1);
- acc3.s2 = fma(a3.s3, b0.s2, acc3.s2);
- acc3.s3 = fma(a3.s3, b0.s3, acc3.s3);
-#endif // M0 > 3
-
- src_addr.s0 += 4 * sizeof(float);
- }
-
- for(; i < (int)K; ++i)
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0));
-#if M0 > 1
- float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1));
-#endif // M0 > 1
-#if M0 > 2
- float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2));
-#endif // M0 > 2
-#if M0 > 3
- float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3));
-#endif // M0 > 3
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
-#if M0 > 1
- float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // M0 > 1
-#if M0 > 2
- float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // M0 > 2
-#if M0 > 3
- float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // M0 > 3
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Load values from matrix B
- float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
-
- // Multiply and accumulate
- acc0.s0 = fma(a0, b0.s0, acc0.s0);
- acc0.s1 = fma(a0, b0.s1, acc0.s1);
- acc0.s2 = fma(a0, b0.s2, acc0.s2);
- acc0.s3 = fma(a0, b0.s3, acc0.s3);
-#if M0 > 1
- acc1.s0 = fma(a1, b0.s0, acc1.s0);
- acc1.s1 = fma(a1, b0.s1, acc1.s1);
- acc1.s2 = fma(a1, b0.s2, acc1.s2);
- acc1.s3 = fma(a1, b0.s3, acc1.s3);
-#endif // M0 > 1
-#if M0 > 2
- acc2.s0 = fma(a2, b0.s0, acc2.s0);
- acc2.s1 = fma(a2, b0.s1, acc2.s1);
- acc2.s2 = fma(a2, b0.s2, acc2.s2);
- acc2.s3 = fma(a2, b0.s3, acc2.s3);
-#endif // M0 > 2
-#if M0 > 3
- acc3.s0 = fma(a3, b0.s0, acc3.s0);
- acc3.s1 = fma(a3, b0.s1, acc3.s1);
- acc3.s2 = fma(a3, b0.s2, acc3.s2);
- acc3.s3 = fma(a3, b0.s3, acc3.s3);
-#endif // M0 > 3
-
- src_addr.s0 += sizeof(float);
- }
-
- int z = get_global_id(2);
-
- // Compute dst address
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0,
- PARTIAL_STORE_M0) * dst_stride_y);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (dst_cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(M0, float, acc, ALPHA);
-#endif // defined(ALPHA)
-
- // Add beta*bias
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float));
-
- LOAD_BLOCK(1, 4, float, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, float, bias, BETA);
-#endif // UNIT_BIAS
-
- // acc = acc + bias[broadcasted]
- ADD_BLOCK_BROADCAST(M0, acc, bias0);
-
-#else // defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0,
- PARTIAL_STORE_M0)
- * src2_stride_y)
- + z * src2_stride_z;
-
- LOAD_BLOCK(M0, 4, float, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(M0, float, bias, BETA);
-#endif // UNIT_BIAS
-
- // acc = acc + bias
- ADD_BLOCK(M0, acc, bias);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, float, VEC_SIZE, acc, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store the output block
- const bool cond_y = get_global_id(1) == 0;
- const bool cond_x = ((get_global_id(0) + 1) * 4 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(M0, 4, float, acc, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-
-/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not been reshaped
- *
- * @note This OpenCL kernel works with the 32-bit floating point data type (float) and uses the fma units.
- * This OpenCL kernel is optimized for Bifrost when the number of matrix B columns is less or equal to 1000.
- * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0.
- * @note This kernel processed a fixed number of elements along x: -DN0=2.
- * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D)
- * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_floating_point_f32_bifrost_1000(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint src_cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint dst_cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- // Requires 2 N0, C vect2, A vect4, B (2 vload2) // to fix for M0 > 1
- int idx = get_global_id(0) * N0;
-
- // Compute starting address for matrix A and Matrix B
- int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
-
- // Update address for the matrix A
- src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y;
-
- // Update address for the matrix B
- src_addr.s1 += idx * sizeof(float);
-
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D
- uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zin = min(DEPTH_GEMM3D - 1, zin);
-
- // Add offset due to the cross plane paddings
- zin *= (src_cross_plane_pad * src0_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply src0_stride_z by DEPTH_GEMM3D
- src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D;
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- // Add offset for batched GEMM
- src_addr.s0 += get_global_id(2) * src0_stride_z;
-
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src_addr.s1 += get_global_id(2) * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- // Initialize accumulators
- float2 acc0 = 0.0f;
-#if M0 > 1
- float2 acc1 = 0.0f;
-#endif // M0 > 1
-#if M0 > 2
- float2 acc2 = 0.0f;
-#endif // M0 > 2
-#if M0 > 3
- float2 acc3 = 0.0f;
-#endif // M0 > 3
-
- // A and B src indices get incremented at the same time.
- int i = 0;
- for(; i <= ((int)K - 8); i += 8)
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- float8 a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + zin.s0));
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- float8 a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0));
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Load values from matrix B
- float2 b0 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- float2 b1 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- float2 b2 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- float2 b3 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- float2 b4 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- float2 b5 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- float2 b6 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- float2 b7 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
-
- // Multiply and accumulate
- acc0.s0 = fma(a0.s0, b0.s0, acc0.s0);
- acc0.s0 = fma(a0.s1, b1.s0, acc0.s0);
- acc0.s0 = fma(a0.s2, b2.s0, acc0.s0);
- acc0.s0 = fma(a0.s3, b3.s0, acc0.s0);
- acc0.s0 = fma(a0.s4, b4.s0, acc0.s0);
- acc0.s0 = fma(a0.s5, b5.s0, acc0.s0);
- acc0.s0 = fma(a0.s6, b6.s0, acc0.s0);
- acc0.s0 = fma(a0.s7, b7.s0, acc0.s0);
-
- acc0.s1 = fma(a0.s0, b0.s1, acc0.s1);
- acc0.s1 = fma(a0.s1, b1.s1, acc0.s1);
- acc0.s1 = fma(a0.s2, b2.s1, acc0.s1);
- acc0.s1 = fma(a0.s3, b3.s1, acc0.s1);
- acc0.s1 = fma(a0.s4, b4.s1, acc0.s1);
- acc0.s1 = fma(a0.s5, b5.s1, acc0.s1);
- acc0.s1 = fma(a0.s6, b6.s1, acc0.s1);
- acc0.s1 = fma(a0.s7, b7.s1, acc0.s1);
-
-#if M0 > 1
-#if defined(REINTERPRET_INPUT_AS_3D)
- a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1));
-#else // defined(REINTERPRET_INPUT_AS_3D)
- a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // defined(REINTERPRET_INPUT_AS_3D)
- acc1.s0 = fma(a0.s0, b0.s0, acc1.s0);
- acc1.s0 = fma(a0.s1, b1.s0, acc1.s0);
- acc1.s0 = fma(a0.s2, b2.s0, acc1.s0);
- acc1.s0 = fma(a0.s3, b3.s0, acc1.s0);
- acc1.s0 = fma(a0.s4, b4.s0, acc1.s0);
- acc1.s0 = fma(a0.s5, b5.s0, acc1.s0);
- acc1.s0 = fma(a0.s6, b6.s0, acc1.s0);
- acc1.s0 = fma(a0.s7, b7.s0, acc1.s0);
-
- acc1.s1 = fma(a0.s0, b0.s1, acc1.s1);
- acc1.s1 = fma(a0.s1, b1.s1, acc1.s1);
- acc1.s1 = fma(a0.s2, b2.s1, acc1.s1);
- acc1.s1 = fma(a0.s3, b3.s1, acc1.s1);
- acc1.s1 = fma(a0.s4, b4.s1, acc1.s1);
- acc1.s1 = fma(a0.s5, b5.s1, acc1.s1);
- acc1.s1 = fma(a0.s6, b6.s1, acc1.s1);
- acc1.s1 = fma(a0.s7, b7.s1, acc1.s1);
-#endif // M0 > 1
-#if M0 > 2
-#if defined(REINTERPRET_INPUT_AS_3D)
- a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2));
-#else // defined(REINTERPRET_INPUT_AS_3D)
- a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // defined(REINTERPRET_INPUT_AS_3D)
- acc2.s0 = fma(a0.s0, b0.s0, acc2.s0);
- acc2.s0 = fma(a0.s1, b1.s0, acc2.s0);
- acc2.s0 = fma(a0.s2, b2.s0, acc2.s0);
- acc2.s0 = fma(a0.s3, b3.s0, acc2.s0);
- acc2.s0 = fma(a0.s4, b4.s0, acc2.s0);
- acc2.s0 = fma(a0.s5, b5.s0, acc2.s0);
- acc2.s0 = fma(a0.s6, b6.s0, acc2.s0);
- acc2.s0 = fma(a0.s7, b7.s0, acc2.s0);
-
- acc2.s1 = fma(a0.s0, b0.s1, acc2.s1);
- acc2.s1 = fma(a0.s1, b1.s1, acc2.s1);
- acc2.s1 = fma(a0.s2, b2.s1, acc2.s1);
- acc2.s1 = fma(a0.s3, b3.s1, acc2.s1);
- acc2.s1 = fma(a0.s4, b4.s1, acc2.s1);
- acc2.s1 = fma(a0.s5, b5.s1, acc2.s1);
- acc2.s1 = fma(a0.s6, b6.s1, acc2.s1);
- acc2.s1 = fma(a0.s7, b7.s1, acc2.s1);
-#endif // M0 > 2
-#if M0 > 3
-#if defined(REINTERPRET_INPUT_AS_3D)
- a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3));
-#else // defined(REINTERPRET_INPUT_AS_3D)
- a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // defined(REINTERPRET_INPUT_AS_3D)
- acc3.s0 = fma(a0.s0, b0.s0, acc3.s0);
- acc3.s0 = fma(a0.s1, b1.s0, acc3.s0);
- acc3.s0 = fma(a0.s2, b2.s0, acc3.s0);
- acc3.s0 = fma(a0.s3, b3.s0, acc3.s0);
- acc3.s0 = fma(a0.s4, b4.s0, acc3.s0);
- acc3.s0 = fma(a0.s5, b5.s0, acc3.s0);
- acc3.s0 = fma(a0.s6, b6.s0, acc3.s0);
- acc3.s0 = fma(a0.s7, b7.s0, acc3.s0);
-
- acc3.s1 = fma(a0.s0, b0.s1, acc3.s1);
- acc3.s1 = fma(a0.s1, b1.s1, acc3.s1);
- acc3.s1 = fma(a0.s2, b2.s1, acc3.s1);
- acc3.s1 = fma(a0.s3, b3.s1, acc3.s1);
- acc3.s1 = fma(a0.s4, b4.s1, acc3.s1);
- acc3.s1 = fma(a0.s5, b5.s1, acc3.s1);
- acc3.s1 = fma(a0.s6, b6.s1, acc3.s1);
- acc3.s1 = fma(a0.s7, b7.s1, acc3.s1);
-#endif // M0 > 3
-
- src_addr.s0 += sizeof(float) * 8;
- }
- // float size increment
- for(; i < (int)K; ++i)
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0));
-#if M0 > 1
- float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1));
-#endif // M0 > 1
-#if M0 > 2
- float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2));
-#endif // M0 > 2
-#if M0 > 3
- float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3));
-#endif // M0 > 3
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
-#if M0 > 1
- float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // M0 > 1
-#if M0 > 2
- float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // M0 > 2
-#if M0 > 3
- float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // M0 > 3
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Load values from matrix B
- float2 b0 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
-
- // Multiply and accumulate
- acc0.s0 = fma(a0, b0.s0, acc0.s0);
- acc0.s1 = fma(a0, b0.s1, acc0.s1);
-#if M0 > 1
- acc1.s0 = fma(a1, b0.s0, acc1.s0);
- acc1.s1 = fma(a1, b0.s1, acc1.s1);
-#endif // M0 > 1
-#if M0 > 2
- acc2.s0 = fma(a2, b0.s0, acc2.s0);
- acc2.s1 = fma(a2, b0.s1, acc2.s1);
-#endif // M0 > 2
-#if M0 > 3
- acc3.s0 = fma(a3, b0.s0, acc3.s0);
- acc3.s1 = fma(a3, b0.s1, acc3.s1);
-#endif // M0 > 3
-
- src_addr.s0 += sizeof(float);
- }
-
- int z = get_global_id(2);
-
- // Compute dst address
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)2 * sizeof(float)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0,
- PARTIAL_STORE_M0) * dst_stride_y);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (dst_cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(M0, float, acc, ALPHA);
-#endif // defined(ALPHA)
-
- // Add beta*bias
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)2 * sizeof(float));
-
- LOAD_BLOCK(1, 2, float, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, float, bias, BETA);
-#endif // UNIT_BIAS
-
- // acc = acc + bias[broadcasted]
- ADD_BLOCK_BROADCAST(M0, acc, bias0);
-
-#else // defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)2 * sizeof(float)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0,
- PARTIAL_STORE_M0)
- * src2_stride_y)
- + z * src2_stride_z;
-
- LOAD_BLOCK(M0, 2, float, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(M0, float, bias, BETA);
-#endif // UNIT_BIAS
-
- // acc = acc + bias
- ADD_BLOCK(M0, acc, bias);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, float, VEC_SIZE, acc, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store the output block
- const bool cond_y = get_global_id(1) == 0;
- const bool cond_x = ((get_global_id(0) + 1) * 2 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(M0, 2, float, acc, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-
-#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED)
-/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not beed reshaped
- *
- * @note This OpenCL kernel works with the 16-bit floating point data type (half) and accumulating the result in a 32 floating point variable.
- * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0.
- * @note This kernel processed a fixed number of elements along x: -DN0=8.
- * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D)
- * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_floating_point_f16_bifrost_acc32(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint src_cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint dst_cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- int idx = get_global_id(0) * N0;
-
- // Compute starting address for matrix A and Matrix B
- int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
-
- // Update address for the matrix A
- src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y;
-
- // Update address for the matrix B
- src_addr.s1 += idx * sizeof(half);
-
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D
- uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zin = min(DEPTH_GEMM3D - 1, zin);
-
- // Add offset due to the cross plane paddings
- zin *= (src_cross_plane_pad * src0_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply src0_stride_z by DEPTH_GEMM3D
- src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D;
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- // Add offset for batched GEMM
- src_addr.s0 += get_global_id(2) * src0_stride_z;
-
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src_addr.s1 += get_global_id(2) * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- float8 acc0 = 0.0h;
-#if M0 > 1
- float8 acc1 = 0.0h;
-#endif // M0 > 1
-#if M0 > 2
- float8 acc2 = 0.0h;
-#endif // M0 > 2
-#if M0 > 3
- float8 acc3 = 0.0h;
-#endif // M0 > 3
-
- int i = 0;
- for(; i <= ((int)K - 4); i += 4)
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- LOAD_BLOCK(M0, 4, half, a, src0_ptr, src_addr.s0, src0_stride_y, zin.s);
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- half4 a0 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
-#if M0 > 1
- half4 a1 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // M0 > 1
-#if M0 > 2
- half4 a2 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // M0 > 2
-#if M0 > 3
- half4 a3 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // M0 > 3
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Load values from matrix B
- float8 b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1)));
- src_addr.s1 += src1_stride_y;
-
- // Accumulate
- acc0 = fma(b0, (float8)a0.s0, acc0);
-#if M0 > 1
- acc1 = fma(b0, (float8)a1.s0, acc1);
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (float8)a2.s0, acc2);
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (float8)a3.s0, acc3);
-#endif // M0 > 3
-
- b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1)));
- src_addr.s1 += src1_stride_y;
- acc0 = fma(b0, (float8)a0.s1, acc0);
-#if M0 > 1
- acc1 = fma(b0, (float8)a1.s1, acc1);
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (float8)a2.s1, acc2);
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (float8)a3.s1, acc3);
-#endif // M0 > 3
-
- b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1)));
- src_addr.s1 += src1_stride_y;
- acc0 = fma(b0, (float8)a0.s2, acc0);
-#if M0 > 1
- acc1 = fma(b0, (float8)a1.s2, acc1);
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (float8)a2.s2, acc2);
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (float8)a3.s2, acc3);
-#endif // M0 > 3
-
- b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1)));
- src_addr.s1 += src1_stride_y;
- acc0 = fma(b0, (float8)a0.s3, acc0);
-#if M0 > 1
- acc1 = fma(b0, (float8)a1.s3, acc1);
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (float8)a2.s3, acc2);
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (float8)a3.s3, acc3);
-#endif // M0 > 3
-
- src_addr.s0 += 4 * sizeof(half);
- }
-
- for(; i < (int)K; ++i)
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- half a0 = *((__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0));
-#if M0 > 1
- half a1 = *((__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1));
-#endif // M0 > 1
-#if M0 > 2
- half a2 = *((__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2));
-#endif // M0 > 2
-#if M0 > 3
- half a3 = *((__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3));
-#endif // M0 > 3
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- half a0 = *((__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
-#if M0 > 1
- half a1 = *((__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // M0 > 1
-#if M0 > 2
- half a2 = *((__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // M0 > 2
-#if M0 > 3
- half a3 = *((__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // M0 > 3
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Load values from matrix B
- float8 b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1)));
-
- src_addr += (int2)(sizeof(half), src1_stride_y);
-
- // Accumulate
- acc0 = fma(b0, (float8)a0, acc0); // b0 * (half8)a0;
-#if M0 > 1
- acc1 = fma(b0, (float8)a1, acc1); // b0 * (half8)a1;
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (float8)a2, acc2); // b0 * (half8)a2;
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (float8)a3, acc3); // b0 * (half8)a3;
-#endif // M0 > 3
- }
-
- int z = get_global_id(2);
-
- // Compute dst address
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * dst_stride_y);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (dst_cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(M0, float, acc, ALPHA);
-#endif // defined(ALPHA)
-
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half));
-
- LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
- float8 bias_f0 = convert_float8(bias0);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, float, bias_f, BETA);
-#endif // UNIT_BIAS
-
- // acc = acc + bias[broadcasted]
- ADD_BLOCK_BROADCAST(M0, acc, bias_f0);
-
-#else // defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0,
- PARTIAL_STORE_M0)
- * src2_stride_y)
- + z * src2_stride_z;
-
- LOAD_BLOCK(M0, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
- float8 bias_f0 = convert_float8(bias0);
-#if M0 > 1
- float8 bias_f1 = convert_float8(bias1);
-#endif // M0 > 1
-#if M0 > 2
- float8 bias_f2 = convert_float8(bias2);
-#endif // M0 > 2
-#if M0 > 3
- float8 bias_f3 = convert_float8(bias3);
-#endif // M0 > 3
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(M0, float, bias_f, BETA);
-#endif // UNIT_BIAS
-
- // acc = acc + bias
- ADD_BLOCK(M0, acc, bias_f);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
- half8 acc_h0 = convert_half8(acc0);
-#if M0 > 1
- half8 acc_h1 = convert_half8(acc1);
-#endif // M0 > 1
-#if M0 > 2
- half8 acc_h2 = convert_half8(acc2);
-#endif // M0 > 2
-#if M0 > 3
- half8 acc_h3 = convert_half8(acc3);
-#endif // M0 > 3
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, half, VEC_SIZE, acc_h, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store the output block
- const bool cond_y = get_global_id(1) == 0;
- const bool cond_x = ((get_global_id(0) + 1) * 8 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(M0, 8, half, acc_h, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-
-/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not beed reshaped
- *
- * @note This OpenCL kernel works with the 16-bit floating point data type (half) and uses the fma units.
- * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0.
- * @note This kernel processed a fixed number of elements along x: -DN0=8.
- * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The optional alpha's value need to be passed at compile time using -DALPHA
- * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16)
- * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16])
- *
- * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
- * The activation function is performed after the bias addition
- * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
- * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
- * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
- * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D)
- * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemm_mm_floating_point_f16_bifrost(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
-#if defined(BETA)
- IMAGE_DECLARATION(src2),
-#endif // defined(BETA)
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
-#if defined(BETA)
- uint src2_stride_z,
-#endif //defined(BETA)
- uint dst_stride_z
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint src_cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint dst_cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- int idx = get_global_id(0) * N0;
-
- // Compute starting address for matrix A and Matrix B
- int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
-
- // Update address for the matrix A
- src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y;
-
- // Update address for the matrix B
- src_addr.s1 += idx * sizeof(half);
-
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D
- uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zin = min(DEPTH_GEMM3D - 1, zin);
-
- // Add offset due to the cross plane paddings
- zin *= (src_cross_plane_pad * src0_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply src0_stride_z by DEPTH_GEMM3D
- src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D;
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- // Add offset for batched GEMM
- src_addr.s0 += get_global_id(2) * src0_stride_z;
-
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src_addr.s1 += get_global_id(2) * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- half8 acc0 = 0.0h;
-#if M0 > 1
- half8 acc1 = 0.0h;
-#endif // M0 > 1
-#if M0 > 2
- half8 acc2 = 0.0h;
-#endif // M0 > 2
-#if M0 > 3
- half8 acc3 = 0.0h;
-#endif // M0 > 3
-
- int i = 0;
- for(; i <= ((int)K - 4); i += 4)
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- LOAD_BLOCK(M0, 4, half, a, src0_ptr, src_addr.s0, src0_stride_y, zin.s);
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- half4 a0 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
-#if M0 > 1
- half4 a1 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // M0 > 1
-#if M0 > 2
- half4 a2 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // M0 > 2
-#if M0 > 3
- half4 a3 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // M0 > 3
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Load values from matrix B
- half8 b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
-
- // Accumulate
- acc0 = fma(b0, (half8)a0.s0, acc0);
-#if M0 > 1
- acc1 = fma(b0, (half8)a1.s0, acc1);
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (half8)a2.s0, acc2);
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (half8)a3.s0, acc3);
-#endif // M0 > 3
-
- b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- acc0 = fma(b0, (half8)a0.s1, acc0);
-#if M0 > 1
- acc1 = fma(b0, (half8)a1.s1, acc1);
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (half8)a2.s1, acc2);
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (half8)a3.s1, acc3);
-#endif // M0 > 3
-
- b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- acc0 = fma(b0, (half8)a0.s2, acc0);
-#if M0 > 1
- acc1 = fma(b0, (half8)a1.s2, acc1);
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (half8)a2.s2, acc2);
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (half8)a3.s2, acc3);
-#endif // M0 > 3
-
- b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1));
- src_addr.s1 += src1_stride_y;
- acc0 = fma(b0, (half8)a0.s3, acc0);
-#if M0 > 1
- acc1 = fma(b0, (half8)a1.s3, acc1);
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (half8)a2.s3, acc2);
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (half8)a3.s3, acc3);
-#endif // M0 > 3
-
- src_addr.s0 += 4 * sizeof(half);
- }
-
- for(; i < (int)K; ++i)
- {
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- half a0 = *((__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0));
-#if M0 > 1
- half a1 = *((__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1));
-#endif // M0 > 1
-#if M0 > 2
- half a2 = *((__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2));
-#endif // M0 > 2
-#if M0 > 3
- half a3 = *((__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3));
-#endif // M0 > 3
-#else // defined(REINTERPRET_INPUT_AS_3D)
- // Load values from matrix A
- half a0 = *((__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
-#if M0 > 1
- half a1 = *((__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
-#endif // M0 > 1
-#if M0 > 2
- half a2 = *((__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
-#endif // M0 > 2
-#if M0 > 3
- half a3 = *((__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
-#endif // M0 > 3
-#endif // defined(REINTERPRET_INPUT_AS_3D)
-
- // Load values from matrix B
- half8 b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1));
-
- src_addr += (int2)(sizeof(half), src1_stride_y);
-
- // Accumulate
- acc0 = fma(b0, (half8)a0, acc0); // b0 * (half8)a0;
-#if M0 > 1
- acc1 = fma(b0, (half8)a1, acc1); // b0 * (half8)a1;
-#endif // M0 > 1
-#if M0 > 2
- acc2 = fma(b0, (half8)a2, acc2); // b0 * (half8)a2;
-#endif // M0 > 2
-#if M0 > 3
- acc3 = fma(b0, (half8)a3, acc3); // b0 * (half8)a3;
-#endif // M0 > 3
- }
-
- int z = get_global_id(2);
-
- // Compute dst address
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * dst_stride_y);
-
- uint4 zout = 0;
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D
- zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (dst_cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst_addr += z * dst_stride_z;
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-
- // Multiply by the weight of matrix-matrix product and store the result
-#if defined(ALPHA)
- SCALE_BLOCK(M0, half, acc, ALPHA);
-#endif // defined(ALPHA)
-
- // Add beta*bias
-#if defined(BETA)
- REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0);
-
-#if defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half));
-
- LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(1, half, bias, BETA);
-#endif // UNIT_BIAS
-
- // acc = acc + bias[broadcasted]
- ADD_BLOCK_BROADCAST(M0, acc, bias0);
-
-#else // defined(BROADCAST_BIAS)
- __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0,
- PARTIAL_STORE_M0)
- * src2_stride_y)
- + z * src2_stride_z;
-
- LOAD_BLOCK(M0, 8, half, bias, src2_addr, 0, src2_stride_y, zero);
-
-#ifndef UNIT_BETA
- SCALE_BLOCK(M0, half, bias, BETA);
-#endif // UNIT_BIAS
-
- // acc = acc + bias
- ADD_BLOCK(M0, acc, bias);
-
-#endif // defined(BROADCAST_BIAS)
-#endif // defined(BETA)
-
-#if defined(ACTIVATION_TYPE)
- ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, half, VEC_SIZE, acc, A_VAL, B_VAL);
-#endif // defined(ACTIVATION_TYPE)
-
- // Store the output block
- const bool cond_y = get_global_id(1) == 0;
- const bool cond_x = ((get_global_id(0) + 1) * 8 >= N);
- STORE_BLOCK_BOUNDARY_AWARE(M0, 8, half, acc, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
-}
-#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED)
-
-#endif // defined(N) && defined(K) && defined(M0) && defined(N0) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h
index d5e8352438..6e05a513ec 100644
--- a/src/core/CL/cl_kernels/helpers.h
+++ b/src/core/CL/cl_kernels/helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_HELPER_H
-#define ARM_COMPUTE_HELPER_H
+#ifndef ACL_SRC_CORE_CL_CL_KERNELS_HELPERS_H
+#define ACL_SRC_CORE_CL_CL_KERNELS_HELPERS_H
#include "load_store_utility.h"
@@ -44,6 +44,7 @@
#define GPU_ARCH_MIDGARD 0x100
#define GPU_ARCH_BIFROST 0x200
+#define GPU_ARCH_VALHALL 0x300
/** Concatenate two inputs.
*
@@ -80,11 +81,11 @@
* @return The reversed vector
* @{
*/
-#define REV1(x) ((x))
-#define REV2(x) ((x).s10)
-#define REV3(x) ((x).s210)
-#define REV4(x) ((x).s3210)
-#define REV8(x) ((x).s76543210)
+#define REV1(x) ((x))
+#define REV2(x) ((x).s10)
+#define REV3(x) ((x).s210)
+#define REV4(x) ((x).s3210)
+#define REV8(x) ((x).s76543210)
#define REV16(x) ((x).sFEDCBA9876543210)
/** @} */ // end of group REVn
@@ -98,7 +99,7 @@
* @{
*/
#define REVERSE_STR(x, s) REV##s((x))
-#define REVERSE(x, s) REVERSE_STR(x, s)
+#define REVERSE(x, s) REVERSE_STR(x, s)
/** @} */ // end of group REVERSE
/** Circular-right-shift (rotate-right) the vector of size s by the amount of n.
@@ -137,16 +138,16 @@
#define ROT8_7(x) ((x).s12345670)
#define ROT8_8(x) ((x))
-#define ROT16_0(x) ((x))
-#define ROT16_1(x) ((x).sF0123456789ABCDE)
-#define ROT16_2(x) ((x).sEF0123456789ABCD)
-#define ROT16_3(x) ((x).sDEF0123456789ABC)
-#define ROT16_4(x) ((x).sCDEF0123456789AB)
-#define ROT16_5(x) ((x).sBCDEF0123456789A)
-#define ROT16_6(x) ((x).sABCDEF0123456789)
-#define ROT16_7(x) ((x).s9ABCDEF012345678)
-#define ROT16_8(x) ((x).s89ABCDEF01234567)
-#define ROT16_9(x) ((x).s789ABCDEF0123456)
+#define ROT16_0(x) ((x))
+#define ROT16_1(x) ((x).sF0123456789ABCDE)
+#define ROT16_2(x) ((x).sEF0123456789ABCD)
+#define ROT16_3(x) ((x).sDEF0123456789ABC)
+#define ROT16_4(x) ((x).sCDEF0123456789AB)
+#define ROT16_5(x) ((x).sBCDEF0123456789A)
+#define ROT16_6(x) ((x).sABCDEF0123456789)
+#define ROT16_7(x) ((x).s9ABCDEF012345678)
+#define ROT16_8(x) ((x).s89ABCDEF01234567)
+#define ROT16_9(x) ((x).s789ABCDEF0123456)
#define ROT16_10(x) ((x).s6789ABCDEF012345)
#define ROT16_11(x) ((x).s56789ABCDEF01234)
#define ROT16_12(x) ((x).s456789ABCDEF0123)
@@ -167,7 +168,7 @@
* @{
*/
#define ROTATE_STR(x, s, n) ROT##s##_##n(x)
-#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
+#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
/** @} */ // end of group ROTATE
/** Creates a vector of size n filled with offset values corresponding to the location of each element.
@@ -178,11 +179,11 @@
* @return The vector filled with offset values
* @{
*/
-#define V_OFFS1(dt) (dt##1)(0)
-#define V_OFFS2(dt) (dt##2)(0, 1)
-#define V_OFFS3(dt) (dt##3)(0, 1, 2)
-#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
-#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
+#define V_OFFS1(dt) (dt##1)(0)
+#define V_OFFS2(dt) (dt##2)(0, 1)
+#define V_OFFS3(dt) (dt##3)(0, 1, 2)
+#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
+#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
/** @} */ // end of group V_OFFSn
@@ -196,14 +197,216 @@
* @{
*/
#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
-#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
+#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
/** @} */ // end of group VEC_OFFS
#define VLOAD_STR(size) vload##size
-#define VLOAD(size) VLOAD_STR(size)
+#define VLOAD(size) VLOAD_STR(size)
-#define PIXEL_UNIT4 1
-#define PIXEL_UNIT8 2
+/** Extended partial vload that correctly handles scalar values as well.
+ * Load the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of load ops
+ * @name VLOAD_PARTIAL
+ *
+ * @note With this macro, the passed data can be both a vector and a scalar
+ * @note @p load_size needs to be <= @p size
+ * eg 1: Valid
+ * VLOAD_PARTIAL(16, 15) ...;
+ * eg 2: Invalid
+ * VLOAD_PARTIAL(4, 7) ...;
+ *
+ * @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
+ * @param[in] load_size The number of lower elements to load. Supported values: 1-16, but has to be <= @p size
+ * @{
+ */
+#define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size
+#define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size)
+
+#define NO_LOAD(data, offs, ptr) \
+ { \
+ }
+
+// Size == 1 (scalar)
+#define vload_partial_1_0 NO_LOAD
+#define vload_partial_1_1 vload1
+#define vload_partial_1_2 NO_LOAD
+#define vload_partial_1_3 NO_LOAD
+#define vload_partial_1_4 NO_LOAD
+#define vload_partial_1_5 NO_LOAD
+#define vload_partial_1_6 NO_LOAD
+#define vload_partial_1_7 NO_LOAD
+#define vload_partial_1_8 NO_LOAD
+#define vload_partial_1_9 NO_LOAD
+#define vload_partial_1_10 NO_LOAD
+#define vload_partial_1_11 NO_LOAD
+#define vload_partial_1_12 NO_LOAD
+#define vload_partial_1_13 NO_LOAD
+#define vload_partial_1_14 NO_LOAD
+#define vload_partial_1_15 NO_LOAD
+#define vload_partial_1_16 NO_LOAD
+// Size == 2
+#define vload_partial_2_0 NO_LOAD
+#define vload_partial_2_1 vload_partial_1
+#define vload_partial_2_2 vload_partial_2
+#define vload_partial_2_3 NO_LOAD
+#define vload_partial_2_4 NO_LOAD
+#define vload_partial_2_5 NO_LOAD
+#define vload_partial_2_6 NO_LOAD
+#define vload_partial_2_7 NO_LOAD
+#define vload_partial_2_8 NO_LOAD
+#define vload_partial_2_9 NO_LOAD
+#define vload_partial_2_10 NO_LOAD
+#define vload_partial_2_11 NO_LOAD
+#define vload_partial_2_12 NO_LOAD
+#define vload_partial_2_13 NO_LOAD
+#define vload_partial_2_14 NO_LOAD
+#define vload_partial_2_15 NO_LOAD
+#define vload_partial_2_16 NO_LOAD
+// Size == 3
+#define vload_partial_3_0 NO_LOAD
+#define vload_partial_3_1 vload_partial_1
+#define vload_partial_3_2 vload_partial_2
+#define vload_partial_3_3 vload_partial_3
+#define vload_partial_3_4 NO_LOAD
+#define vload_partial_3_5 NO_LOAD
+#define vload_partial_3_6 NO_LOAD
+#define vload_partial_3_7 NO_LOAD
+#define vload_partial_3_8 NO_LOAD
+#define vload_partial_3_9 NO_LOAD
+#define vload_partial_3_10 NO_LOAD
+#define vload_partial_3_11 NO_LOAD
+#define vload_partial_3_12 NO_LOAD
+#define vload_partial_3_13 NO_LOAD
+#define vload_partial_3_14 NO_LOAD
+#define vload_partial_3_15 NO_LOAD
+#define vload_partial_3_16 NO_LOAD
+// Size == 4
+#define vload_partial_4_0 NO_LOAD
+#define vload_partial_4_1 vload_partial_1
+#define vload_partial_4_2 vload_partial_2
+#define vload_partial_4_3 vload_partial_3
+#define vload_partial_4_4 vload_partial_4
+#define vload_partial_4_5 NO_LOAD
+#define vload_partial_4_6 NO_LOAD
+#define vload_partial_4_7 NO_LOAD
+#define vload_partial_4_8 NO_LOAD
+#define vload_partial_4_9 NO_LOAD
+#define vload_partial_4_10 NO_LOAD
+#define vload_partial_4_11 NO_LOAD
+#define vload_partial_4_12 NO_LOAD
+#define vload_partial_4_13 NO_LOAD
+#define vload_partial_4_14 NO_LOAD
+#define vload_partial_4_15 NO_LOAD
+#define vload_partial_4_16 NO_LOAD
+// Size == 8
+#define vload_partial_8_0 NO_LOAD
+#define vload_partial_8_1 vload_partial_1
+#define vload_partial_8_2 vload_partial_2
+#define vload_partial_8_3 vload_partial_3
+#define vload_partial_8_4 vload_partial_4
+#define vload_partial_8_5 vload_partial_5
+#define vload_partial_8_6 vload_partial_6
+#define vload_partial_8_7 vload_partial_7
+#define vload_partial_8_8 vload_partial_8
+#define vload_partial_8_9 NO_LOAD
+#define vload_partial_8_10 NO_LOAD
+#define vload_partial_8_11 NO_LOAD
+#define vload_partial_8_12 NO_LOAD
+#define vload_partial_8_13 NO_LOAD
+#define vload_partial_8_14 NO_LOAD
+#define vload_partial_8_15 NO_LOAD
+#define vload_partial_8_16 NO_LOAD
+// Size == 16
+#define vload_partial_16_0 NO_LOAD
+#define vload_partial_16_1 vload_partial_1
+#define vload_partial_16_2 vload_partial_2
+#define vload_partial_16_3 vload_partial_3
+#define vload_partial_16_4 vload_partial_4
+#define vload_partial_16_5 vload_partial_5
+#define vload_partial_16_6 vload_partial_6
+#define vload_partial_16_7 vload_partial_7
+#define vload_partial_16_8 vload_partial_8
+#define vload_partial_16_9 vload_partial_9
+#define vload_partial_16_10 vload_partial_10
+#define vload_partial_16_11 vload_partial_11
+#define vload_partial_16_12 vload_partial_12
+#define vload_partial_16_13 vload_partial_13
+#define vload_partial_16_14 vload_partial_14
+#define vload_partial_16_15 vload_partial_15
+#define vload_partial_16_16 vload_partial_16
+
+/** Partial vload. Load the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vload ops
+ * @name vload_partial_n
+ *
+ * @note @p DATA needs to be a vector not a scalar
+ * @note n needs to be <= the vector width of the input variable @p DATA
+ * eg 1: Valid
+ * vload_partial_15(var:float16, 0, 0xabcd);
+ * eg 2: Invalid
+ * vload_partial_7(var:float4, 0, 0xabcd);
+ *
+ * @note in cases n == 1, 2, 3, 4, 8, 16, no extra vload is invoked, thus there's no performance penalty.
+ *
+ * @param[in] DATA The name of the variable where to load the values
+ * @param[in] OFFSET Offset in n
+ * @param[in] PTR The base pointer
+ * @{
+ */
+#define vload_partial_1(DATA, OFFSET, PTR) DATA.s0 = vload1(OFFSET, PTR);
+
+#define vload_partial_2(DATA, OFFSET, PTR) DATA.s01 = vload2(OFFSET, PTR);
+
+#define vload_partial_3(DATA, OFFSET, PTR) DATA.s012 = vload3(OFFSET, PTR);
+
+#define vload_partial_4(DATA, OFFSET, PTR) DATA.s0123 = vload4(OFFSET, PTR);
+
+#define vload_partial_5(DATA, OFFSET, PTR) \
+ vload_partial_4(DATA.s0123, OFFSET, PTR); \
+ DATA.s4 = vload1(OFFSET, PTR + 4);
+
+#define vload_partial_6(DATA, OFFSET, PTR) \
+ vload_partial_4(DATA.s0123, OFFSET, PTR); \
+ vload_partial_2(DATA.s45, OFFSET, PTR + 4);
+
+#define vload_partial_7(DATA, OFFSET, PTR) \
+ vload_partial_4(DATA.s0123, OFFSET, PTR); \
+ vload_partial_3(DATA.s456, OFFSET, PTR + 4);
+
+#define vload_partial_8(DATA, OFFSET, PTR) DATA.s01234567 = vload8(OFFSET, PTR);
+
+#define vload_partial_9(DATA, OFFSET, PTR) \
+ vload_partial_8(DATA.s01234567, OFFSET, PTR); \
+ DATA.s8 = vload1(OFFSET, PTR + 8);
+
+#define vload_partial_10(DATA, OFFSET, PTR) \
+ vload_partial_8(DATA.s01234567, OFFSET, PTR); \
+ vload_partial_2(DATA.s89, OFFSET, PTR + 8);
+
+#define vload_partial_11(DATA, OFFSET, PTR) \
+ vload_partial_8(DATA.s01234567, OFFSET, PTR); \
+ vload_partial_3(DATA.s89A, OFFSET, PTR + 8);
+
+#define vload_partial_12(DATA, OFFSET, PTR) \
+ vload_partial_8(DATA.s01234567, OFFSET, PTR); \
+ vload_partial_4(DATA.s89AB, OFFSET, PTR + 8);
+// For vload_partial_{13,14,15}, an 8-vector size has been passed, because vectors size of size 5,6,7 are not supported
+#define vload_partial_13(DATA, OFFSET, PTR) \
+ vload_partial_8(DATA.s01234567, OFFSET, PTR); \
+ vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8);
+
+#define vload_partial_14(DATA, OFFSET, PTR) \
+ vload_partial_8(DATA.s01234567, OFFSET, PTR); \
+ vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8);
+
+#define vload_partial_15(DATA, OFFSET, PTR) \
+ vload_partial_8(DATA.s01234567, OFFSET, PTR); \
+ vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8);
+
+#define vload_partial_16(DATA, OFFSET, PTR) DATA = vload16(OFFSET, PTR);
+/** @} */ // end of groupd vload_partial_n
+/** @} */ // end of groupd VLOAD_PARTIAL
+
+#define PIXEL_UNIT4 1
+#define PIXEL_UNIT8 2
#define PIXEL_UNIT16 4
/** Utility macro to convert a vector size in pixel unit.
@@ -216,17 +419,45 @@
* @{
*/
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
-#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
+#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
/** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
-#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
-#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
+#define read_image2d_floatx2(img, x_coord, y_coord) \
+ (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
+#define read_image2d_floatx4(img, x_coord, y_coord) \
+ (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), \
+ read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
-#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
-#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
+#define read_image2d_halfx2(img, x_coord, y_coord) \
+ (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
+#define read_image2d_halfx4(img, x_coord, y_coord) \
+ (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), \
+ read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
+#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
+
+#define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values));
+#define write_image2d_floatx2(img, x_coord, y_coord, values) \
+ (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), \
+ write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567));
+#define write_image2d_floatx4(img, x_coord, y_coord, values) \
+ (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), \
+ write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), \
+ write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), \
+ write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
+
+#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
+#define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values));
+#define write_image2d_halfx2(img, x_coord, y_coord, values) \
+ (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), \
+ write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567));
+#define write_image2d_halfx4(img, x_coord, y_coord, values) \
+ (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), \
+ write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), \
+ write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), \
+ write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
/** Utility macro to read a 2D OpenCL image object.
@@ -243,24 +474,44 @@
* @{
*/
#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
-#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
+#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
+/** @} */
+
+/** Utility macro to write a 2D OpenCL image object.
+ *
+ * @note Coordinates are not normalized
+ *
+ * @param[in] data_type Data type
+ * @param[in] n0 Number of pixel to write. Only 1,2 and 4 is supported
+ * @param[in] img OpenCL image object
+ * @param[in] x_coord The x coordinate for the top-left pixel
+ * @param[in] y_coord The y coordinate for the top-left pixel
+ * @param[in] values Values to write
+ *
+ * @{
+ */
+#define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) \
+ write_image2d_##data_type##x##n0(img, x_coord, y_coord, values)
+#define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) \
+ WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values)
+/** @} */
#define VSTORE_STR(size) vstore##size
-#define VSTORE(size) VSTORE_STR(size)
+#define VSTORE(size) VSTORE_STR(size)
-#define float1 float
-#define half1 half
-#define char1 char
-#define uchar1 uchar
-#define short1 short
+#define float1 float
+#define half1 half
+#define char1 char
+#define uchar1 uchar
+#define short1 short
#define ushort1 ushort
-#define int1 int
-#define uint1 uint
-#define long1 long
-#define ulong1 ulong
+#define int1 int
+#define uint1 uint
+#define long1 long
+#define ulong1 ulong
#define double1 double
-#define vload1(OFFSET, PTR) *(OFFSET + PTR)
+#define vload1(OFFSET, PTR) *(OFFSET + PTR)
#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
/** Extended partial vstore that correctly handles scalar values as well.
@@ -279,23 +530,23 @@
* @{
*/
#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
-#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
+#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
#define NO_STORE(data, offs, ptr) \
{ \
}
// Size == 1 (scalar)
-#define vstore_partial_1_0 NO_STORE
-#define vstore_partial_1_1 vstore1
-#define vstore_partial_1_2 NO_STORE
-#define vstore_partial_1_3 NO_STORE
-#define vstore_partial_1_4 NO_STORE
-#define vstore_partial_1_5 NO_STORE
-#define vstore_partial_1_6 NO_STORE
-#define vstore_partial_1_7 NO_STORE
-#define vstore_partial_1_8 NO_STORE
-#define vstore_partial_1_9 NO_STORE
+#define vstore_partial_1_0 NO_STORE
+#define vstore_partial_1_1 vstore1
+#define vstore_partial_1_2 NO_STORE
+#define vstore_partial_1_3 NO_STORE
+#define vstore_partial_1_4 NO_STORE
+#define vstore_partial_1_5 NO_STORE
+#define vstore_partial_1_6 NO_STORE
+#define vstore_partial_1_7 NO_STORE
+#define vstore_partial_1_8 NO_STORE
+#define vstore_partial_1_9 NO_STORE
#define vstore_partial_1_10 NO_STORE
#define vstore_partial_1_11 NO_STORE
#define vstore_partial_1_12 NO_STORE
@@ -304,16 +555,16 @@
#define vstore_partial_1_15 NO_STORE
#define vstore_partial_1_16 NO_STORE
// Size == 2
-#define vstore_partial_2_0 NO_STORE
-#define vstore_partial_2_1 vstore_partial_1
-#define vstore_partial_2_2 vstore_partial_2
-#define vstore_partial_2_3 NO_STORE
-#define vstore_partial_2_4 NO_STORE
-#define vstore_partial_2_5 NO_STORE
-#define vstore_partial_2_6 NO_STORE
-#define vstore_partial_2_7 NO_STORE
-#define vstore_partial_2_8 NO_STORE
-#define vstore_partial_2_9 NO_STORE
+#define vstore_partial_2_0 NO_STORE
+#define vstore_partial_2_1 vstore_partial_1
+#define vstore_partial_2_2 vstore_partial_2
+#define vstore_partial_2_3 NO_STORE
+#define vstore_partial_2_4 NO_STORE
+#define vstore_partial_2_5 NO_STORE
+#define vstore_partial_2_6 NO_STORE
+#define vstore_partial_2_7 NO_STORE
+#define vstore_partial_2_8 NO_STORE
+#define vstore_partial_2_9 NO_STORE
#define vstore_partial_2_10 NO_STORE
#define vstore_partial_2_11 NO_STORE
#define vstore_partial_2_12 NO_STORE
@@ -322,16 +573,16 @@
#define vstore_partial_2_15 NO_STORE
#define vstore_partial_2_16 NO_STORE
// Size == 3
-#define vstore_partial_3_0 NO_STORE
-#define vstore_partial_3_1 vstore_partial_1
-#define vstore_partial_3_2 vstore_partial_2
-#define vstore_partial_3_3 vstore_partial_3
-#define vstore_partial_3_4 NO_STORE
-#define vstore_partial_3_5 NO_STORE
-#define vstore_partial_3_6 NO_STORE
-#define vstore_partial_3_7 NO_STORE
-#define vstore_partial_3_8 NO_STORE
-#define vstore_partial_3_9 NO_STORE
+#define vstore_partial_3_0 NO_STORE
+#define vstore_partial_3_1 vstore_partial_1
+#define vstore_partial_3_2 vstore_partial_2
+#define vstore_partial_3_3 vstore_partial_3
+#define vstore_partial_3_4 NO_STORE
+#define vstore_partial_3_5 NO_STORE
+#define vstore_partial_3_6 NO_STORE
+#define vstore_partial_3_7 NO_STORE
+#define vstore_partial_3_8 NO_STORE
+#define vstore_partial_3_9 NO_STORE
#define vstore_partial_3_10 NO_STORE
#define vstore_partial_3_11 NO_STORE
#define vstore_partial_3_12 NO_STORE
@@ -340,16 +591,16 @@
#define vstore_partial_3_15 NO_STORE
#define vstore_partial_3_16 NO_STORE
// Size == 4
-#define vstore_partial_4_0 NO_STORE
-#define vstore_partial_4_1 vstore_partial_1
-#define vstore_partial_4_2 vstore_partial_2
-#define vstore_partial_4_3 vstore_partial_3
-#define vstore_partial_4_4 vstore_partial_4
-#define vstore_partial_4_5 NO_STORE
-#define vstore_partial_4_6 NO_STORE
-#define vstore_partial_4_7 NO_STORE
-#define vstore_partial_4_8 NO_STORE
-#define vstore_partial_4_9 NO_STORE
+#define vstore_partial_4_0 NO_STORE
+#define vstore_partial_4_1 vstore_partial_1
+#define vstore_partial_4_2 vstore_partial_2
+#define vstore_partial_4_3 vstore_partial_3
+#define vstore_partial_4_4 vstore_partial_4
+#define vstore_partial_4_5 NO_STORE
+#define vstore_partial_4_6 NO_STORE
+#define vstore_partial_4_7 NO_STORE
+#define vstore_partial_4_8 NO_STORE
+#define vstore_partial_4_9 NO_STORE
#define vstore_partial_4_10 NO_STORE
#define vstore_partial_4_11 NO_STORE
#define vstore_partial_4_12 NO_STORE
@@ -358,16 +609,16 @@
#define vstore_partial_4_15 NO_STORE
#define vstore_partial_4_16 NO_STORE
// Size == 8
-#define vstore_partial_8_0 NO_STORE
-#define vstore_partial_8_1 vstore_partial_1
-#define vstore_partial_8_2 vstore_partial_2
-#define vstore_partial_8_3 vstore_partial_3
-#define vstore_partial_8_4 vstore_partial_4
-#define vstore_partial_8_5 vstore_partial_5
-#define vstore_partial_8_6 vstore_partial_6
-#define vstore_partial_8_7 vstore_partial_7
-#define vstore_partial_8_8 vstore_partial_8
-#define vstore_partial_8_9 NO_STORE
+#define vstore_partial_8_0 NO_STORE
+#define vstore_partial_8_1 vstore_partial_1
+#define vstore_partial_8_2 vstore_partial_2
+#define vstore_partial_8_3 vstore_partial_3
+#define vstore_partial_8_4 vstore_partial_4
+#define vstore_partial_8_5 vstore_partial_5
+#define vstore_partial_8_6 vstore_partial_6
+#define vstore_partial_8_7 vstore_partial_7
+#define vstore_partial_8_8 vstore_partial_8
+#define vstore_partial_8_9 NO_STORE
#define vstore_partial_8_10 NO_STORE
#define vstore_partial_8_11 NO_STORE
#define vstore_partial_8_12 NO_STORE
@@ -376,16 +627,16 @@
#define vstore_partial_8_15 NO_STORE
#define vstore_partial_8_16 NO_STORE
// Size == 16
-#define vstore_partial_16_0 NO_STORE
-#define vstore_partial_16_1 vstore_partial_1
-#define vstore_partial_16_2 vstore_partial_2
-#define vstore_partial_16_3 vstore_partial_3
-#define vstore_partial_16_4 vstore_partial_4
-#define vstore_partial_16_5 vstore_partial_5
-#define vstore_partial_16_6 vstore_partial_6
-#define vstore_partial_16_7 vstore_partial_7
-#define vstore_partial_16_8 vstore_partial_8
-#define vstore_partial_16_9 vstore_partial_9
+#define vstore_partial_16_0 NO_STORE
+#define vstore_partial_16_1 vstore_partial_1
+#define vstore_partial_16_2 vstore_partial_2
+#define vstore_partial_16_3 vstore_partial_3
+#define vstore_partial_16_4 vstore_partial_4
+#define vstore_partial_16_5 vstore_partial_5
+#define vstore_partial_16_6 vstore_partial_6
+#define vstore_partial_16_7 vstore_partial_7
+#define vstore_partial_16_8 vstore_partial_8
+#define vstore_partial_16_9 vstore_partial_9
#define vstore_partial_16_10 vstore_partial_10
#define vstore_partial_16_11 vstore_partial_11
#define vstore_partial_16_12 vstore_partial_12
@@ -411,17 +662,13 @@
* @param[in] PTR The base pointer
* @{
*/
-#define vstore_partial_1(DATA, OFFSET, PTR) \
- vstore1(DATA.s0, OFFSET, PTR);
+#define vstore_partial_1(DATA, OFFSET, PTR) vstore1(DATA.s0, OFFSET, PTR);
-#define vstore_partial_2(DATA, OFFSET, PTR) \
- vstore2(DATA.s01, OFFSET, PTR);
+#define vstore_partial_2(DATA, OFFSET, PTR) vstore2(DATA.s01, OFFSET, PTR);
-#define vstore_partial_3(DATA, OFFSET, PTR) \
- vstore3(DATA.s012, OFFSET, PTR);
+#define vstore_partial_3(DATA, OFFSET, PTR) vstore3(DATA.s012, OFFSET, PTR);
-#define vstore_partial_4(DATA, OFFSET, PTR) \
- vstore4(DATA.s0123, OFFSET, PTR);
+#define vstore_partial_4(DATA, OFFSET, PTR) vstore4(DATA.s0123, OFFSET, PTR);
#define vstore_partial_5(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
@@ -435,8 +682,7 @@
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
-#define vstore_partial_8(DATA, OFFSET, PTR) \
- vstore8(DATA.s01234567, OFFSET, PTR);
+#define vstore_partial_8(DATA, OFFSET, PTR) vstore8(DATA.s01234567, OFFSET, PTR);
#define vstore_partial_9(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
@@ -466,147 +712,156 @@
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
-#define vstore_partial_16(DATA, OFFSET, PTR) \
- vstore16(DATA, OFFSET, PTR);
+#define vstore_partial_16(DATA, OFFSET, PTR) vstore16(DATA, OFFSET, PTR);
/** @} */ // end of groupd vstore_partial_n
/** @} */ // end of groupd VSTORE_PARTIAL
// Convert built-in functions with _sat modifier are not supported in floating point so we create defines
// without _sat to overcome this issue
-#define convert_float_sat convert_float
-#define convert_float1_sat convert_float
-#define convert_float2_sat convert_float2
-#define convert_float3_sat convert_float3
-#define convert_float4_sat convert_float4
-#define convert_float8_sat convert_float8
+#define convert_float_sat convert_float
+#define convert_float1_sat convert_float
+#define convert_float2_sat convert_float2
+#define convert_float3_sat convert_float3
+#define convert_float4_sat convert_float4
+#define convert_float8_sat convert_float8
#define convert_float16_sat convert_float16
-#define convert_half_sat convert_float
-#define convert_half1_sat convert_half
-#define convert_half2_sat convert_half2
-#define convert_half3_sat convert_half3
-#define convert_half4_sat convert_half4
-#define convert_half8_sat convert_half8
-#define convert_half16_sat convert_half16
-
-#define convert_float1 convert_float
-#define convert_half1 convert_half
-#define convert_char1 convert_char
-#define convert_uchar1 convert_uchar
-#define convert_short1 convert_short
+#define convert_half_sat convert_float
+#define convert_half1_sat convert_half
+#define convert_half2_sat convert_half2
+#define convert_half3_sat convert_half3
+#define convert_half4_sat convert_half4
+#define convert_half8_sat convert_half8
+#define convert_half16_sat convert_half16
+
+#define convert_float1 convert_float
+#define convert_half1 convert_half
+#define convert_char1 convert_char
+#define convert_uchar1 convert_uchar
+#define convert_short1 convert_short
#define convert_ushort1 convert_ushort
-#define convert_int1 convert_int
-#define convert_uint1 convert_uint
-#define convert_long1 convert_long
-#define convert_ulong1 convert_ulong
+#define convert_int1 convert_int
+#define convert_uint1 convert_uint
+#define convert_long1 convert_long
+#define convert_ulong1 convert_ulong
#define convert_double1 convert_double
-#define convert_char1_sat convert_char_sat
-#define convert_uchar1_sat convert_uchar_sat
-#define convert_short1_sat convert_short_sat
+#define convert_char1_sat convert_char_sat
+#define convert_uchar1_sat convert_uchar_sat
+#define convert_uchar2_sat convert_uchar2_sat
+#define convert_uchar3_sat convert_uchar3_sat
+#define convert_uchar4_sat convert_uchar4_sat
+#define convert_uchar8_sat convert_uchar8_sat
+#define convert_uchar16_sat convert_uchar16_sat
+#define convert_short1_sat convert_short_sat
#define convert_ushort1_sat convert_ushort_sat
-#define convert_int1_sat convert_int_sat
-#define convert_uint1_sat convert_uint_sat
-#define convert_long1_sat convert_long_sat
-#define convert_ulong1_sat convert_ulong_sat
+#define convert_int1_sat convert_int_sat
+#define convert_uint1_sat convert_uint_sat
+#define convert_long1_sat convert_long_sat
+#define convert_ulong1_sat convert_ulong_sat
#define convert_double1_sat convert_double_sat
#define VEC_DATA_TYPE_STR(type, size) type##size
-#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
+#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
#define CONVERT_STR(x, type) (convert_##type((x)))
-#define CONVERT(x, type) CONVERT_STR(x, type)
+#define CONVERT(x, type) CONVERT_STR(x, type)
#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
-#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
+#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
-#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
+#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
-#define select_vec_dt_uchar(size) uchar##size
-#define select_vec_dt_char(size) char##size
+#define select_vec_dt_uchar(size) uchar##size
+#define select_vec_dt_char(size) char##size
#define select_vec_dt_ushort(size) ushort##size
-#define select_vec_dt_short(size) short##size
-#define select_vec_dt_half(size) short##size
-#define select_vec_dt_uint(size) uint##size
-#define select_vec_dt_int(size) int##size
-#define select_vec_dt_float(size) int##size
-#define select_vec_dt_ulong(size) ulong##size
-#define select_vec_dt_long(size) long##size
+#define select_vec_dt_short(size) short##size
+#define select_vec_dt_half(size) short##size
+#define select_vec_dt_uint(size) uint##size
+#define select_vec_dt_int(size) int##size
+#define select_vec_dt_float(size) int##size
+#define select_vec_dt_ulong(size) ulong##size
+#define select_vec_dt_long(size) long##size
#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
-#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
-#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
+#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
+#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
-#define signed_int_vec_dt_uchar(size) char##size
-#define signed_int_vec_dt_char(size) char##size
+#define signed_int_vec_dt_uchar(size) char##size
+#define signed_int_vec_dt_char(size) char##size
#define signed_int_vec_dt_ushort(size) short##size
-#define signed_int_vec_dt_short(size) short##size
-#define signed_int_vec_dt_half(size) short##size
-#define signed_int_vec_dt_uint(size) int##size
-#define signed_int_vec_dt_int(size) int##size
-#define signed_int_vec_dt_float(size) int##size
-#define signed_int_vec_dt_ulong(size) long##size
-#define signed_int_vec_dt_long(size) long##size
+#define signed_int_vec_dt_short(size) short##size
+#define signed_int_vec_dt_half(size) short##size
+#define signed_int_vec_dt_uint(size) int##size
+#define signed_int_vec_dt_int(size) int##size
+#define signed_int_vec_dt_float(size) int##size
+#define signed_int_vec_dt_ulong(size) long##size
+#define signed_int_vec_dt_long(size) long##size
#define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size)
-#define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size)
-#define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1)
-
-#define sum_reduce_1(x) (x)
-#define sum_reduce_2(x) ((x).s0) + ((x).s1)
-#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
-#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
-#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
+#define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size)
+#define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1)
+
+#define sum_reduce_1(x) (x)
+#define sum_reduce_2(x) ((x).s0) + ((x).s1)
+#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
+#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
+#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
-#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
-
-#define max_reduce_1(x) (x)
-#define max_reduce_2(x) max(((x).s0), ((x).s1))
-#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
-#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
-#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
+#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
+
+#define prod_reduce_1(x) (x)
+#define prod_reduce_2(x) ((x).s0) * ((x).s1)
+#define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2)
+#define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23)
+#define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567)
+#define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF)
+
+#define PROD_REDUCE_STR(x, size) prod_reduce_##size(x)
+#define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size)
+
+#define max_reduce_1(x) (x)
+#define max_reduce_2(x) max(((x).s0), ((x).s1))
+#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
+#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
+#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
#define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
-#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
-
-#define VECTOR_DECLARATION(name) \
- __global uchar *name##_ptr, \
- uint name##_stride_x, \
- uint name##_step_x, \
- uint name##_offset_first_element_in_bytes
-
-#define IMAGE_DECLARATION(name) \
- __global uchar *name##_ptr, \
- uint name##_stride_x, \
- uint name##_step_x, \
- uint name##_stride_y, \
- uint name##_step_y, \
- uint name##_offset_first_element_in_bytes
-
-#define TENSOR3D_DECLARATION(name) \
- __global uchar *name##_ptr, \
- uint name##_stride_x, \
- uint name##_step_x, \
- uint name##_stride_y, \
- uint name##_step_y, \
- uint name##_stride_z, \
- uint name##_step_z, \
- uint name##_offset_first_element_in_bytes
-
-#define TENSOR4D_DECLARATION(name) \
- __global uchar *name##_ptr, \
- uint name##_stride_x, \
- uint name##_step_x, \
- uint name##_stride_y, \
- uint name##_step_y, \
- uint name##_stride_z, \
- uint name##_step_z, \
- uint name##_stride_w, \
- uint name##_step_w, \
- uint name##_offset_first_element_in_bytes
+#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
+
+#define min_reduce_1(x) (x)
+#define min_reduce_2(x) min(((x).s0), ((x).s1))
+#define min_reduce_3(x) min(min_reduce_2((x).s01), ((x).s2))
+#define min_reduce_4(x) min(min_reduce_2((x).s01), min_reduce_2((x).s23))
+#define min_reduce_8(x) min(min_reduce_4((x).s0123), min_reduce_4((x).s4567))
+#define min_reduce_16(x) min(min_reduce_8((x).s01234567), min_reduce_8((x).s89ABCDEF))
+
+#define MIN_REDUCE_STR(x, size) min_reduce_##size(x)
+#define MIN_REDUCE(x, size) MIN_REDUCE_STR(x, size)
+
+#define VECTOR_DECLARATION(name) \
+ __global uchar *name##_ptr, uint name##_stride_x, uint name##_step_x, uint name##_offset_first_element_in_bytes
+
+#define IMAGE_DECLARATION(name) \
+ __global uchar *name##_ptr, uint name##_stride_x, uint name##_step_x, uint name##_stride_y, uint name##_step_y, \
+ uint name##_offset_first_element_in_bytes
+
+#define TENSOR3D_DECLARATION(name) \
+ __global uchar *name##_ptr, uint name##_stride_x, uint name##_step_x, uint name##_stride_y, uint name##_step_y, \
+ uint name##_stride_z, uint name##_step_z, uint name##_offset_first_element_in_bytes
+
+#define TENSOR4D_DECLARATION(name) \
+ __global uchar *name##_ptr, uint name##_stride_x, uint name##_step_x, uint name##_stride_y, uint name##_step_y, \
+ uint name##_stride_z, uint name##_step_z, uint name##_stride_w, uint name##_step_w, \
+ uint name##_offset_first_element_in_bytes
+
+#define TENSOR5D_DECLARATION(name) \
+ __global uchar *name##_ptr, uint name##_stride_x, uint name##_step_x, uint name##_stride_y, uint name##_step_y, \
+ uint name##_stride_z, uint name##_step_z, uint name##_stride_w, uint name##_step_w, uint name##_stride_v, \
+ uint name##_step_v, uint name##_offset_first_element_in_bytes
#define CONVERT_TO_VECTOR_STRUCT(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
@@ -614,38 +869,47 @@
#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
-#define CONVERT_TO_IMAGE_STRUCT(name) \
- update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
+#define CONVERT_TO_IMAGE_STRUCT(name) \
+ update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, \
+ name##_stride_y, name##_step_y)
#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
-#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
- update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
+#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
+ update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, \
+ name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, \
+ name##_step_z)
-#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
- update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
+#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
+ update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, \
+ name##_stride_y, 0, name##_stride_z, name##_step_z)
-#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
- update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
+#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
+ update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, \
+ name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, \
+ name##_step_z)
-#define CONVERT_TO_TENSOR3D_STRUCT(name) \
- update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
- name##_stride_z, name##_step_z)
+#define CONVERT_TO_TENSOR3D_STRUCT(name) \
+ update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, \
+ name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
-#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
- update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
+#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
+ update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, \
+ name##_stride_y, 0, name##_stride_z, 0)
-#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
- update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
- name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
+#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
+ update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, \
+ name##_stride_y, name##_step_y, name##_stride_z, name##_step_z, name##_stride_w, \
+ name##_step_w, mod_size)
-#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
- update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
+#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name) \
+ update_tensor4D_workitem_no_step_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, \
+ name##_stride_y, name##_stride_z, name##_stride_w)
-#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
- tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
- name##_stride_z, name##_step_z)
+#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
+ tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, \
+ name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
/** Structure to hold Vector information */
typedef struct Vector
@@ -694,10 +958,10 @@ typedef struct Tensor4D
*
* @return An image object
*/
-inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
+inline Vector
+update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
{
- Vector vector =
- {
+ Vector vector = {
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
@@ -717,15 +981,13 @@ inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_
*
* @return An image object
*/
-inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
+inline Image update_image_workitem_ptr(
+ __global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
{
- Image img =
- {
- .ptr = ptr,
- .offset_first_element_in_bytes = offset_first_element_in_bytes,
- .stride_x = stride_x,
- .stride_y = stride_y
- };
+ Image img = {.ptr = ptr,
+ .offset_first_element_in_bytes = offset_first_element_in_bytes,
+ .stride_x = stride_x,
+ .stride_y = stride_y};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
return img;
}
@@ -743,16 +1005,21 @@ inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_el
*
* @return A 3D tensor object
*/
-inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
+inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr,
+ uint offset_first_element_in_bytes,
+ uint stride_x,
+ uint step_x,
+ uint stride_y,
+ uint step_y,
+ uint stride_z,
+ uint step_z)
{
- Image img =
- {
- .ptr = ptr,
- .offset_first_element_in_bytes = offset_first_element_in_bytes,
- .stride_x = stride_x,
- .stride_y = stride_y
- };
- img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
+ Image img = {.ptr = ptr,
+ .offset_first_element_in_bytes = offset_first_element_in_bytes,
+ .stride_x = stride_x,
+ .stride_y = stride_y};
+ img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y +
+ get_global_id(2) * step_z;
return img;
}
@@ -769,17 +1036,22 @@ inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint o
*
* @return A 3D tensor object
*/
-inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
+inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr,
+ uint offset_first_element_in_bytes,
+ uint stride_x,
+ uint step_x,
+ uint stride_y,
+ uint step_y,
+ uint stride_z,
+ uint step_z)
{
- Tensor3D tensor =
- {
- .ptr = ptr,
- .offset_first_element_in_bytes = offset_first_element_in_bytes,
- .stride_x = stride_x,
- .stride_y = stride_y,
- .stride_z = stride_z
- };
- tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
+ Tensor3D tensor = {.ptr = ptr,
+ .offset_first_element_in_bytes = offset_first_element_in_bytes,
+ .stride_x = stride_x,
+ .stride_y = stride_y,
+ .stride_z = stride_z};
+ tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y +
+ get_global_id(2) * step_z;
return tensor;
}
@@ -796,34 +1068,58 @@ inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_fi
*
* @return A 3D tensor object
*/
-inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
+inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr,
+ uint offset_first_element_in_bytes,
+ uint stride_x,
+ uint step_x,
+ uint stride_y,
+ uint step_y,
+ uint stride_z,
+ uint step_z)
{
- Tensor3D tensor =
- {
- .ptr = ptr,
- .offset_first_element_in_bytes = offset_first_element_in_bytes,
- .stride_x = stride_x,
- .stride_y = stride_y,
- .stride_z = stride_z
- };
+ Tensor3D tensor = {.ptr = ptr,
+ .offset_first_element_in_bytes = offset_first_element_in_bytes,
+ .stride_x = stride_x,
+ .stride_y = stride_y,
+ .stride_z = stride_z};
return tensor;
}
-inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
- uint step_w,
- uint mod_size)
+inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr,
+ uint offset_first_element_in_bytes,
+ uint stride_x,
+ uint step_x,
+ uint stride_y,
+ uint step_y,
+ uint stride_z,
+ uint step_z,
+ uint stride_w,
+ uint step_w,
+ uint mod_size)
{
- Tensor4D tensor =
- {
- .ptr = ptr,
- .offset_first_element_in_bytes = offset_first_element_in_bytes,
- .stride_x = stride_x,
- .stride_y = stride_y,
- .stride_z = stride_z,
- .stride_w = stride_w
- };
+ Tensor4D tensor = {.ptr = ptr,
+ .offset_first_element_in_bytes = offset_first_element_in_bytes,
+ .stride_x = stride_x,
+ .stride_y = stride_y,
+ .stride_z = stride_z,
+ .stride_w = stride_w};
+
+ tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y +
+ (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
+ return tensor;
+}
- tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
+inline Tensor4D update_tensor4D_workitem_no_step_ptr(
+ __global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint stride_y, uint stride_z, uint stride_w)
+{
+ Tensor4D tensor = {.ptr = ptr,
+ .offset_first_element_in_bytes = offset_first_element_in_bytes,
+ .stride_x = stride_x,
+ .stride_y = stride_y,
+ .stride_z = stride_z,
+ .stride_w = stride_w};
+
+ tensor.ptr += tensor.offset_first_element_in_bytes;
return tensor;
}
@@ -895,7 +1191,8 @@ inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint wid
const uint x = index;
- return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
+ return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z +
+ tensor->offset_first_element_in_bytes;
}
-#endif // _HELPER_H
+#endif // ACL_SRC_CORE_CL_CL_KERNELS_HELPERS_H
diff --git a/src/core/CL/cl_kernels/helpers_asymm.h b/src/core/CL/cl_kernels/helpers_asymm.h
index eea4458170..166260a3c0 100644
--- a/src/core/CL/cl_kernels/helpers_asymm.h
+++ b/src/core/CL/cl_kernels/helpers_asymm.h
@@ -34,7 +34,7 @@
* @return The converted vector
*/
#define CONVERT_DOWN_RTE_STR(x, type) (convert_##type##_rte((x)))
-#define CONVERT_DOWN_RTE(x, type) CONVERT_DOWN_RTE_STR(x, type)
+#define CONVERT_DOWN_RTE(x, type) CONVERT_DOWN_RTE_STR(x, type)
/** Quantize a floating-point scalar value to 8-bit asymmetric
*
@@ -84,14 +84,15 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
*
* @return quantized values
*/
-#define QUANTIZE_IMPL(type, size) \
- inline VEC_DATA_TYPE(type, size) quantize_##type##size(VEC_DATA_TYPE(float, size) input, float offset, float scale) \
- { \
- VEC_DATA_TYPE(float, size) \
- out_f32 = input / (VEC_DATA_TYPE(float, size))(scale) + (VEC_DATA_TYPE(float, size))(offset); \
- VEC_DATA_TYPE(type, size) \
- res = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, VEC_DATA_TYPE(int, size)), VEC_DATA_TYPE(type, size)); \
- return res; \
+#define QUANTIZE_IMPL(type, size) \
+ inline VEC_DATA_TYPE(type, size) \
+ quantize_##type##size(VEC_DATA_TYPE(float, size) input, float offset, float scale) \
+ { \
+ VEC_DATA_TYPE(float, size) \
+ out_f32 = input / (VEC_DATA_TYPE(float, size))(scale) + (VEC_DATA_TYPE(float, size))(offset); \
+ VEC_DATA_TYPE(type, size) \
+ res = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, VEC_DATA_TYPE(int, size)), VEC_DATA_TYPE(type, size)); \
+ return res; \
}
/** Dequantize a vector of values to floating-point
@@ -101,10 +102,11 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
*
* @return dequantized values in floating point
*/
-#define DEQUANTIZE_IMPL(type, size) \
- inline VEC_DATA_TYPE(float, size) dequantize_##type##size(VEC_DATA_TYPE(type, size) input, float offset, float scale) \
- { \
- return (CONVERT(input, VEC_DATA_TYPE(float, size)) - offset) * scale; \
+#define DEQUANTIZE_IMPL(type, size) \
+ inline VEC_DATA_TYPE(float, size) \
+ dequantize_##type##size(VEC_DATA_TYPE(type, size) input, float offset, float scale) \
+ { \
+ return (CONVERT(input, VEC_DATA_TYPE(float, size)) - offset) * scale; \
}
/** Correctly-rounded-to-nearest division by a power-of-two.
@@ -113,18 +115,17 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
*
* @return Correctly-rounded-to-nearest division by a power-of-two.
*/
-#define ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(size) \
- inline VEC_DATA_TYPE(int, size) asymm_rounding_divide_by_POW2_##size(VEC_DATA_TYPE(int, size) x, VEC_DATA_TYPE(int, size) exponent) \
- { \
- const VEC_DATA_TYPE(int, size) \
- zero = (VEC_DATA_TYPE(int, size))0; \
- const VEC_DATA_TYPE(int, size) \
- one = (VEC_DATA_TYPE(int, size))1; \
- VEC_DATA_TYPE(int, size) \
- mask = (one << exponent) - one; \
- VEC_DATA_TYPE(int, size) \
- threshold = (mask >> 1) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))(x < 0)); \
- return (x >> exponent) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))((x & mask) > threshold)); \
+#define ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(size) \
+ inline VEC_DATA_TYPE(int, size) \
+ asymm_rounding_divide_by_POW2_##size(VEC_DATA_TYPE(int, size) x, VEC_DATA_TYPE(int, size) exponent) \
+ { \
+ const VEC_DATA_TYPE(int, size) zero = (VEC_DATA_TYPE(int, size))0; \
+ const VEC_DATA_TYPE(int, size) one = (VEC_DATA_TYPE(int, size))1; \
+ VEC_DATA_TYPE(int, size) \
+ mask = (one << exponent) - one; \
+ VEC_DATA_TYPE(int, size) \
+ threshold = (mask >> 1) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))(x < 0)); \
+ return (x >> exponent) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))((x & mask) > threshold)); \
}
/** Product of two numbers, interpreting them as fixed-point values in the interval [-1, 1),
@@ -167,41 +168,44 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
*
* @return Result in fixed-point format Q0.
*/
-#define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(size) \
- inline VEC_DATA_TYPE(int, size) asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(VEC_DATA_TYPE(int, size) a) \
- { \
- const VEC_DATA_TYPE(int, size) constant_term = 1895147668; \
- const VEC_DATA_TYPE(int, size) constant_1_over_3 = 715827883; \
- const int k_fractional_bits = 31; \
- VEC_DATA_TYPE(int, size) \
- x = a + (1 << (k_fractional_bits - 3)); \
- VEC_DATA_TYPE(int, size) \
- x2 = ASYMM_MULT(x, x, size); \
- VEC_DATA_TYPE(int, size) \
- x3 = ASYMM_MULT(x2, x, size); \
- VEC_DATA_TYPE(int, size) \
- x4 = ASYMM_MULT(x2, x2, size); \
- VEC_DATA_TYPE(int, size) \
- x4_over_4 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4, 2, size); \
- VEC_DATA_TYPE(int, size) \
- x4_over_24_plus_x3_over_6_plus_x2 = ASYMM_MULT((x4_over_4 + x3), constant_1_over_3, size) + x2; \
- VEC_DATA_TYPE(int, size) \
- x4_over_24_plus_x3_over_6_plus_x2_over_2 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4_over_24_plus_x3_over_6_plus_x2, 1, size); \
- return constant_term + ASYMM_MULT(constant_term, x + x4_over_24_plus_x3_over_6_plus_x2_over_2, size); \
+#define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(size) \
+ inline VEC_DATA_TYPE(int, size) \
+ asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(VEC_DATA_TYPE(int, size) a) \
+ { \
+ const VEC_DATA_TYPE(int, size) constant_term = 1895147668; \
+ const VEC_DATA_TYPE(int, size) constant_1_over_3 = 715827883; \
+ const int k_fractional_bits = 31; \
+ VEC_DATA_TYPE(int, size) \
+ x = a + (1 << (k_fractional_bits - 3)); \
+ VEC_DATA_TYPE(int, size) \
+ x2 = ASYMM_MULT(x, x, size); \
+ VEC_DATA_TYPE(int, size) \
+ x3 = ASYMM_MULT(x2, x, size); \
+ VEC_DATA_TYPE(int, size) \
+ x4 = ASYMM_MULT(x2, x2, size); \
+ VEC_DATA_TYPE(int, size) \
+ x4_over_4 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4, 2, size); \
+ VEC_DATA_TYPE(int, size) \
+ x4_over_24_plus_x3_over_6_plus_x2 = ASYMM_MULT((x4_over_4 + x3), constant_1_over_3, size) + x2; \
+ VEC_DATA_TYPE(int, size) \
+ x4_over_24_plus_x3_over_6_plus_x2_over_2 = \
+ ASYMM_ROUNDING_DIVIDE_BY_POW2(x4_over_24_plus_x3_over_6_plus_x2, 1, size); \
+ return constant_term + ASYMM_MULT(constant_term, x + x4_over_24_plus_x3_over_6_plus_x2_over_2, size); \
}
/** Each bit of the result is set to the corresponding bit of either then_val or
* else_val depending on whether the corresponding bit of if_mask is set.
- * Equivalent to the VBSL instruction in Arm Neon.
+ * Equivalent to the VBSL instruction in Arm® Neon™.
*
* @param[in] size Size of vector.
*
* @returns Result contaning bits from @p then_val or from @p else_val depending on corresponding bit in @p if_mask is set or not.
*/
-#define ASYMM_SELECT_USING_MASK_IMPL(size) \
- inline VEC_DATA_TYPE(int, size) asymm_select_using_mask##size(VEC_DATA_TYPE(int, size) if_mask, VEC_DATA_TYPE(int, size) then_val, VEC_DATA_TYPE(int, size) else_val) \
- { \
- return (if_mask & then_val) ^ (~if_mask & else_val); \
+#define ASYMM_SELECT_USING_MASK_IMPL(size) \
+ inline VEC_DATA_TYPE(int, size) asymm_select_using_mask##size( \
+ VEC_DATA_TYPE(int, size) if_mask, VEC_DATA_TYPE(int, size) then_val, VEC_DATA_TYPE(int, size) else_val) \
+ { \
+ return (if_mask & then_val) ^ (~if_mask & else_val); \
}
/** For each element of input vector, the corresponding bits of the result item are set
@@ -234,18 +238,19 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
return select(all_zeros, all_ones, (SELECT_VEC_DATA_TYPE(int, size))(a != 0)); \
}
-#define EXP_BARREL_SHIFTER_IMPL(size) \
- inline VEC_DATA_TYPE(int, size) exp_barrel_shifter##size(VEC_DATA_TYPE(int, size) result, int exponent, int fp_multiplier, int k_integer_bits, int k_fractional_bits, VEC_DATA_TYPE(int, size) remainder) \
- { \
- if(k_integer_bits > exponent) \
- { \
- const int k_shift_amount = k_integer_bits > exponent ? k_fractional_bits + exponent : 0; \
- return ASYMM_SELECT_USING_MASK( \
- ASYMM_MASK_IF_NON_ZERO(remainder & (1 << k_shift_amount), size), \
- ASYMM_MULT(result, fp_multiplier, size), result, size); \
- } \
- \
- return result; \
+#define EXP_BARREL_SHIFTER_IMPL(size) \
+ inline VEC_DATA_TYPE(int, size) \
+ exp_barrel_shifter##size(VEC_DATA_TYPE(int, size) result, int exponent, int fp_multiplier, int k_integer_bits, \
+ int k_fractional_bits, VEC_DATA_TYPE(int, size) remainder) \
+ { \
+ if (k_integer_bits > exponent) \
+ { \
+ const int k_shift_amount = k_integer_bits > exponent ? k_fractional_bits + exponent : 0; \
+ return ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_NON_ZERO(remainder & (1 << k_shift_amount), size), \
+ ASYMM_MULT(result, fp_multiplier, size), result, size); \
+ } \
+ \
+ return result; \
}
/** Calculates \f$ exp(x) \f$ for x < 0.
@@ -254,39 +259,40 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
*
* @return Result in fixed-point format Q0.
*/
-#define ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(size) \
- inline VEC_DATA_TYPE(int, size) asymm_exp_on_negative_values##size(VEC_DATA_TYPE(int, size) a, int k_integer_bits) \
- { \
- const int k_fractional_bits = 31 - k_integer_bits; \
- VEC_DATA_TYPE(int, size) \
- k_one_quarter = 1 << (k_fractional_bits - 2); \
- VEC_DATA_TYPE(int, size) \
- mask = k_one_quarter - 1; \
- VEC_DATA_TYPE(int, size) \
- a_mod_quarter_minus_one_quarter = (a & mask) - k_one_quarter; \
- VEC_DATA_TYPE(int, size) \
- a_mod_quarter_minus_one_quarter_scaled = a_mod_quarter_minus_one_quarter << k_integer_bits; \
- VEC_DATA_TYPE(int, size) \
- result = ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a_mod_quarter_minus_one_quarter_scaled, size); \
- VEC_DATA_TYPE(int, size) \
- remainder = a_mod_quarter_minus_one_quarter - a; \
- \
- result = EXP_BARREL_SHIFTER(result, -2, 1672461947, k_integer_bits, k_fractional_bits, remainder, size); \
- result = EXP_BARREL_SHIFTER(result, -1, 1302514674, k_integer_bits, k_fractional_bits, remainder, size); \
- result = EXP_BARREL_SHIFTER(result, +0, 790015084, k_integer_bits, k_fractional_bits, remainder, size); \
- result = EXP_BARREL_SHIFTER(result, +1, 290630308, k_integer_bits, k_fractional_bits, remainder, size); \
- result = EXP_BARREL_SHIFTER(result, +2, 39332535, k_integer_bits, k_fractional_bits, remainder, size); \
- result = EXP_BARREL_SHIFTER(result, +3, 720401, k_integer_bits, k_fractional_bits, remainder, size); \
- result = EXP_BARREL_SHIFTER(result, +4, 242, k_integer_bits, k_fractional_bits, remainder, size); \
- \
- if(k_integer_bits > 5) \
- { \
- const VEC_DATA_TYPE(int, size) clamp = -(1 << (k_fractional_bits + 5)); \
- result = ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_NON_ZERO(a < clamp, size), 0, result, size); \
- } \
- \
- const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX; \
- return ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_ZERO(a, size), Q0_one, result, size); \
+#define ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(size) \
+ inline VEC_DATA_TYPE(int, size) asymm_exp_on_negative_values##size(VEC_DATA_TYPE(int, size) a, int k_integer_bits) \
+ { \
+ const int k_fractional_bits = 31 - k_integer_bits; \
+ VEC_DATA_TYPE(int, size) \
+ k_one_quarter = 1 << (k_fractional_bits - 2); \
+ VEC_DATA_TYPE(int, size) \
+ mask = k_one_quarter - 1; \
+ VEC_DATA_TYPE(int, size) \
+ a_mod_quarter_minus_one_quarter = (a & mask) - k_one_quarter; \
+ VEC_DATA_TYPE(int, size) \
+ a_mod_quarter_minus_one_quarter_scaled = a_mod_quarter_minus_one_quarter << k_integer_bits; \
+ VEC_DATA_TYPE(int, size) \
+ result = ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a_mod_quarter_minus_one_quarter_scaled, \
+ size); \
+ VEC_DATA_TYPE(int, size) \
+ remainder = a_mod_quarter_minus_one_quarter - a; \
+ \
+ result = EXP_BARREL_SHIFTER(result, -2, 1672461947, k_integer_bits, k_fractional_bits, remainder, size); \
+ result = EXP_BARREL_SHIFTER(result, -1, 1302514674, k_integer_bits, k_fractional_bits, remainder, size); \
+ result = EXP_BARREL_SHIFTER(result, +0, 790015084, k_integer_bits, k_fractional_bits, remainder, size); \
+ result = EXP_BARREL_SHIFTER(result, +1, 290630308, k_integer_bits, k_fractional_bits, remainder, size); \
+ result = EXP_BARREL_SHIFTER(result, +2, 39332535, k_integer_bits, k_fractional_bits, remainder, size); \
+ result = EXP_BARREL_SHIFTER(result, +3, 720401, k_integer_bits, k_fractional_bits, remainder, size); \
+ result = EXP_BARREL_SHIFTER(result, +4, 242, k_integer_bits, k_fractional_bits, remainder, size); \
+ \
+ if (k_integer_bits > 5) \
+ { \
+ const VEC_DATA_TYPE(int, size) clamp = -(1 << (k_fractional_bits + 5)); \
+ result = ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_NON_ZERO(a < clamp, size), 0, result, size); \
+ } \
+ \
+ const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX; \
+ return ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_ZERO(a, size), Q0_one, result, size); \
}
/** Calculates the product of a integer value by a power of two, with either a positive exponent
@@ -297,49 +303,51 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
*
* @return Arithmetic left or right shift.
*/
-#define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(size) \
- inline VEC_DATA_TYPE(int, size) asymm_saturating_rounding_mult_by_pow2##size(VEC_DATA_TYPE(int, size) x, int exponent) \
- { \
- if(exponent < 0) \
- { \
- return ASYMM_ROUNDING_DIVIDE_BY_POW2(x, -exponent, size); \
- } \
- \
- const VEC_DATA_TYPE(int, size) min = INT_MIN; \
- const VEC_DATA_TYPE(int, size) max = INT_MAX; \
- int threshold = ((1 << (31 - exponent)) - 1); \
- VEC_DATA_TYPE(int, size) \
- positive_mask = ASYMM_MASK_IF_NON_ZERO(x > threshold, size); \
- VEC_DATA_TYPE(int, size) \
- negative_mask = ASYMM_MASK_IF_NON_ZERO(x < -threshold, size); \
- VEC_DATA_TYPE(int, size) \
- result = x << exponent; \
- result = ASYMM_SELECT_USING_MASK(positive_mask, max, result, size); \
- result = ASYMM_SELECT_USING_MASK(negative_mask, min, result, size); \
- return result; \
+#define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(size) \
+ inline VEC_DATA_TYPE(int, size) \
+ asymm_saturating_rounding_mult_by_pow2##size(VEC_DATA_TYPE(int, size) x, int exponent) \
+ { \
+ if (exponent < 0) \
+ { \
+ return ASYMM_ROUNDING_DIVIDE_BY_POW2(x, -exponent, size); \
+ } \
+ \
+ const VEC_DATA_TYPE(int, size) min = INT_MIN; \
+ const VEC_DATA_TYPE(int, size) max = INT_MAX; \
+ int threshold = ((1 << (31 - exponent)) - 1); \
+ VEC_DATA_TYPE(int, size) \
+ positive_mask = ASYMM_MASK_IF_NON_ZERO(x > threshold, size); \
+ VEC_DATA_TYPE(int, size) \
+ negative_mask = ASYMM_MASK_IF_NON_ZERO(x < -threshold, size); \
+ VEC_DATA_TYPE(int, size) \
+ result = x << exponent; \
+ result = ASYMM_SELECT_USING_MASK(positive_mask, max, result, size); \
+ result = ASYMM_SELECT_USING_MASK(negative_mask, min, result, size); \
+ return result; \
}
/** Calculates (a+b)/2, rounded to the nearest integer.
- * Equivalent to VRHADD in the Arm Neon instruction set.
+ * Equivalent to VRHADD in the Arm Arm® Neon™ instruction set.
*
* @param[in] size Size of vector.
*
* @return (a+b)/2, rounded to the nearest integer.
*/
-#define ASYMM_ROUNDING_HALF_SUM_IMPL(size) \
- inline VEC_DATA_TYPE(int, size) asymm_rounding_half_sum##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \
- { \
- VEC_DATA_TYPE(long, size) \
- a64 = convert_long##size(a); \
- VEC_DATA_TYPE(long, size) \
- b64 = convert_long##size(b); \
- VEC_DATA_TYPE(long, size) \
- sum = a64 + b64; \
- const VEC_DATA_TYPE(long, size) one = 1; \
- const VEC_DATA_TYPE(long, size) minus_one = -1; \
- VEC_DATA_TYPE(long, size) \
- sign = select(minus_one, one, (SELECT_VEC_DATA_TYPE(long, size))(sum >= 0)); \
- return convert_int##size((sum + sign) / 2); \
+#define ASYMM_ROUNDING_HALF_SUM_IMPL(size) \
+ inline VEC_DATA_TYPE(int, size) \
+ asymm_rounding_half_sum##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \
+ { \
+ VEC_DATA_TYPE(long, size) \
+ a64 = convert_long##size(a); \
+ VEC_DATA_TYPE(long, size) \
+ b64 = convert_long##size(b); \
+ VEC_DATA_TYPE(long, size) \
+ sum = a64 + b64; \
+ const VEC_DATA_TYPE(long, size) one = 1; \
+ const VEC_DATA_TYPE(long, size) minus_one = -1; \
+ VEC_DATA_TYPE(long, size) \
+ sign = select(minus_one, one, (SELECT_VEC_DATA_TYPE(long, size))(sum >= 0)); \
+ return convert_int##size((sum + sign) / 2); \
}
/** Calculates \f$ 1 / (1 + x) \f$ for x in (0, 1).
@@ -354,12 +362,12 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX; \
const VEC_DATA_TYPE(int, size) Q2_one = 1 << (31 - 2); \
VEC_DATA_TYPE(int, size) \
- half_denominator = ASYMM_ROUNDING_HALF_SUM(a, Q0_one, size); \
+ half_denominator = ASYMM_ROUNDING_HALF_SUM(a, Q0_one, size); \
const VEC_DATA_TYPE(int, size) Q2_48_over_17 = 1515870810; \
const VEC_DATA_TYPE(int, size) Q2_neg_32_over_17 = -1010580540; \
VEC_DATA_TYPE(int, size) \
x = Q2_48_over_17 + ASYMM_MULT(half_denominator, Q2_neg_32_over_17, size); \
- for(int i = 0; i < 3; i++) \
+ for (int i = 0; i < 3; i++) \
{ \
VEC_DATA_TYPE(int, size) \
half_denominator_times_x = ASYMM_MULT(half_denominator, x, size); \
@@ -378,56 +386,78 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
*
* @return Rescaled value.
*/
-#define ASYMM_RESCALE_IMPL(size) \
- inline VEC_DATA_TYPE(int, size) asymm_rescale##size(VEC_DATA_TYPE(int, size) value, int src_integer_bits, int dst_integer_bits) \
- { \
- int exponent = src_integer_bits - dst_integer_bits; \
- return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(value, exponent, size); \
+#define ASYMM_RESCALE_IMPL(size) \
+ inline VEC_DATA_TYPE(int, size) \
+ asymm_rescale##size(VEC_DATA_TYPE(int, size) value, int src_integer_bits, int dst_integer_bits) \
+ { \
+ int exponent = src_integer_bits - dst_integer_bits; \
+ return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(value, exponent, size); \
}
-#define QUANTIZE_STR(input, offset, scale, type, size) quantize_##type##size(input, offset, scale)
-#define QUANTIZE(input, offset, scale, type, size) QUANTIZE_STR(input, offset, scale, type, size)
+#define QUANTIZE_STR(input, offset, scale, type, size) quantize_##type##size(input, offset, scale)
+#define QUANTIZE(input, offset, scale, type, size) QUANTIZE_STR(input, offset, scale, type, size)
#define DEQUANTIZE_STR(input, offset, scale, type, size) dequantize_##type##size(input, offset, scale)
-#define DEQUANTIZE(input, offset, scale, type, size) DEQUANTIZE_STR(input, offset, scale, type, size)
+#define DEQUANTIZE(input, offset, scale, type, size) DEQUANTIZE_STR(input, offset, scale, type, size)
#define ASYMM_ROUNDING_DIVIDE_BY_POW2_STR(x, exponent, size) asymm_rounding_divide_by_POW2_##size(x, exponent)
-#define ASYMM_ROUNDING_DIVIDE_BY_POW2(x, exponent, size) ASYMM_ROUNDING_DIVIDE_BY_POW2_STR(x, exponent, size)
-#define ASYMM_MULT_STR(a, b, size) asymm_mult##size(a, b)
-#define ASYMM_MULT(a, b, size) ASYMM_MULT_STR(a, b, size)
+#define ASYMM_ROUNDING_DIVIDE_BY_POW2(x, exponent, size) ASYMM_ROUNDING_DIVIDE_BY_POW2_STR(x, exponent, size)
+#define ASYMM_MULT_STR(a, b, size) asymm_mult##size(a, b)
+#define ASYMM_MULT(a, b, size) ASYMM_MULT_STR(a, b, size)
#define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(x, quantized_multiplier, left_shift, size) \
ASYMM_MULT(x *((VEC_DATA_TYPE(int, size))(1) << (-left_shift)), quantized_multiplier, size)
#define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, quantized_multiplier, right_shift, size) \
ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(x, quantized_multiplier, size), right_shift, size)
-#define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a, size) asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(a)
-#define ASYMM_SELECT_USING_MASK(if_mask, then_val, else_val, size) asymm_select_using_mask##size(if_mask, then_val, else_val)
-#define ASYMM_MASK_IF_ZERO(a, size) asymm_mask_if_zero##size(a)
+#define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a, size) \
+ asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(a)
+#define ASYMM_SELECT_USING_MASK(if_mask, then_val, else_val, size) \
+ asymm_select_using_mask##size(if_mask, then_val, else_val)
+#define ASYMM_MASK_IF_ZERO(a, size) asymm_mask_if_zero##size(a)
#define ASYMM_MASK_IF_NON_ZERO(a, size) asymm_mask_if_non_zero##size(a)
-#define EXP_BARREL_SHIFTER(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder, size) exp_barrel_shifter##size(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder)
+#define EXP_BARREL_SHIFTER(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder, size) \
+ exp_barrel_shifter##size(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder)
#define ASYMM_EXP_ON_NEGATIVE_VALUES_STR(a, k_integer_bits, size) asymm_exp_on_negative_values##size(a, k_integer_bits)
-#define ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, size) ASYMM_EXP_ON_NEGATIVE_VALUES_STR(a, k_integer_bits, size)
-#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size) asymm_one_over_one_plus_x_for_x_in_0_1##size(a)
-#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(a, size) ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size)
-#define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, exponent, size) asymm_saturating_rounding_mult_by_pow2##size(x, exponent)
+#define ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, size) ASYMM_EXP_ON_NEGATIVE_VALUES_STR(a, k_integer_bits, size)
+#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size) asymm_one_over_one_plus_x_for_x_in_0_1##size(a)
+#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(a, size) ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size)
+#define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, exponent, size) \
+ asymm_saturating_rounding_mult_by_pow2##size(x, exponent)
#define ASYMM_ROUNDING_HALF_SUM(a, b, size) asymm_rounding_half_sum##size(a, b)
-#define ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size) asymm_rescale##size(value, src_integer_bits, dst_integer_bits)
-#define ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, size) ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size)
-
-#define MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(size) \
- inline VEC_DATA_TYPE(int, size) multiply_by_quantized_multiplier##size(VEC_DATA_TYPE(int, size) input, int qmul, int shift) \
- { \
- const int left_shift = shift > 0 ? shift : 0; \
- const int right_shift = shift > 0 ? 0 : -shift; \
- return ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(input * (1 << left_shift), qmul, size), right_shift, size); \
+#define ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size) \
+ asymm_rescale##size(value, src_integer_bits, dst_integer_bits)
+#define ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, size) \
+ ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size)
+
+#define MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(size) \
+ inline VEC_DATA_TYPE(int, size) \
+ multiply_by_quantized_multiplier##size(VEC_DATA_TYPE(int, size) input, int qmul, int shift) \
+ { \
+ const int left_shift = shift > 0 ? shift : 0; \
+ const int right_shift = shift > 0 ? 0 : -shift; \
+ return ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(input * (1 << left_shift), qmul, size), right_shift, size); \
}
-#define MULTIPLY_BY_QUANTIZED_MULTIPLIER(input, qmul, shift, size) multiply_by_quantized_multiplier##size(input, qmul, shift)
+#define MULTIPLY_BY_QUANTIZED_MULTIPLIER(input, qmul, shift, size) \
+ multiply_by_quantized_multiplier##size(input, qmul, shift)
QUANTIZE_IMPL(uchar, 1)
QUANTIZE_IMPL(char, 1)
QUANTIZE_IMPL(uint, 1)
QUANTIZE_IMPL(int, 1)
+QUANTIZE_IMPL(uchar, 2)
+QUANTIZE_IMPL(char, 2)
+QUANTIZE_IMPL(uint, 2)
+QUANTIZE_IMPL(int, 2)
+QUANTIZE_IMPL(uchar, 3)
+QUANTIZE_IMPL(char, 3)
+QUANTIZE_IMPL(uint, 3)
+QUANTIZE_IMPL(int, 3)
QUANTIZE_IMPL(uchar, 4)
QUANTIZE_IMPL(ushort, 4)
QUANTIZE_IMPL(short, 4)
+QUANTIZE_IMPL(int, 4)
+QUANTIZE_IMPL(uchar, 8)
+QUANTIZE_IMPL(char, 8)
+QUANTIZE_IMPL(uint, 8)
+QUANTIZE_IMPL(int, 8)
QUANTIZE_IMPL(uchar, 16)
QUANTIZE_IMPL(char, 16)
QUANTIZE_IMPL(ushort, 16)
@@ -439,9 +469,22 @@ DEQUANTIZE_IMPL(uchar, 1)
DEQUANTIZE_IMPL(char, 1)
DEQUANTIZE_IMPL(uint, 1)
DEQUANTIZE_IMPL(int, 1)
+DEQUANTIZE_IMPL(uchar, 2)
+DEQUANTIZE_IMPL(char, 2)
+DEQUANTIZE_IMPL(uint, 2)
+DEQUANTIZE_IMPL(int, 2)
+DEQUANTIZE_IMPL(uchar, 3)
+DEQUANTIZE_IMPL(char, 3)
+DEQUANTIZE_IMPL(uint, 3)
+DEQUANTIZE_IMPL(int, 3)
DEQUANTIZE_IMPL(uchar, 4)
DEQUANTIZE_IMPL(ushort, 4)
DEQUANTIZE_IMPL(short, 4)
+DEQUANTIZE_IMPL(int, 4)
+DEQUANTIZE_IMPL(uchar, 8)
+DEQUANTIZE_IMPL(char, 8)
+DEQUANTIZE_IMPL(uint, 8)
+DEQUANTIZE_IMPL(int, 8)
DEQUANTIZE_IMPL(uchar, 16)
DEQUANTIZE_IMPL(char, 16)
DEQUANTIZE_IMPL(ushort, 16)
diff --git a/src/core/CL/cl_kernels/l2_normalize.cl b/src/core/CL/cl_kernels/l2_normalize.cl
deleted file mode 100644
index 14b37e3257..0000000000
--- a/src/core/CL/cl_kernels/l2_normalize.cl
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 2016-2019 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-/** This kernel performs l2 normalization on x-axis
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The data size must be passed at compile time using -DDATA_SIZE e.g. -DDATA_SIZE=32
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along X processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] sum_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] sum_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] epsilon Epsilon value
- */
-__kernel void l2_normalize_x(
- IMAGE_DECLARATION(src),
- IMAGE_DECLARATION(sum),
- IMAGE_DECLARATION(dst),
- DATA_TYPE epsilon)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Image sum = CONVERT_TO_IMAGE_STRUCT(sum);
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = vload16(0, (__global DATA_TYPE *)src.ptr);
- VEC_DATA_TYPE(DATA_TYPE, 16)
- normalize_value = (VEC_DATA_TYPE(DATA_TYPE, 16))rsqrt(fmax(((__global DATA_TYPE *)sum.ptr)[0], epsilon));
-
- vstore16(in * normalize_value, 0, (__global DATA_TYPE *)dst.ptr);
-}
-
-/** This kernel performs l2 normalization on y-axis.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The data size must be passed at compile time using -DDATA_SIZE e.g. -DDATA_SIZE=32
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along X processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] sum_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] sum_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] epsilon Epsilon value
- */
-__kernel void l2_normalize_y(
- IMAGE_DECLARATION(src),
- IMAGE_DECLARATION(sum),
- IMAGE_DECLARATION(dst),
- DATA_TYPE epsilon)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Image sum = CONVERT_TO_IMAGE_STRUCT(sum);
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = vload16(0, (__global DATA_TYPE *)src.ptr);
- VEC_DATA_TYPE(DATA_TYPE, 16)
- sums = vload16(0, (__global DATA_TYPE *)sum.ptr);
-
- VEC_DATA_TYPE(DATA_TYPE, 16)
- normalize_value = (VEC_DATA_TYPE(DATA_TYPE, 16))rsqrt(fmax(sums, epsilon));
-
- vstore16(in * normalize_value, 0, (__global DATA_TYPE *)dst.ptr);
-}
-/** This kernel performs l2 normalization on z-axis.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The data size must be passed at compile time using -DDATA_SIZE e.g. -DDATA_SIZE=32
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] sum_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] sum_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] sum_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] epsilon Epsilon value
- */
-__kernel void l2_normalize_z(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(sum),
- TENSOR3D_DECLARATION(dst),
- DATA_TYPE epsilon)
-{
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- Tensor3D sum = CONVERT_TO_TENSOR3D_STRUCT(sum);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = vload16(0, (__global DATA_TYPE *)src.ptr);
- VEC_DATA_TYPE(DATA_TYPE, 16)
- sums = vload16(0, (__global DATA_TYPE *)sum.ptr);
-
- VEC_DATA_TYPE(DATA_TYPE, 16)
- normalize_value = (VEC_DATA_TYPE(DATA_TYPE, 16))rsqrt(fmax(sums, epsilon));
-
- vstore16(in * normalize_value, 0, (__global DATA_TYPE *)dst.ptr);
-} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/load_store_utility.h b/src/core/CL/cl_kernels/load_store_utility.h
index 56b1538c6f..4daf0adc89 100644
--- a/src/core/CL/cl_kernels/load_store_utility.h
+++ b/src/core/CL/cl_kernels/load_store_utility.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -223,8 +223,10 @@
* @param[in] Z The offset in z-axis direction
* @{
*/
-#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
-#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
+#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
+ STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
+#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
+ STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group STORE_BLOCK
/** Convert and store a block of the given size M0xN0
@@ -245,8 +247,10 @@
* @param[in] Z The offset in z-axis direction
* @{
*/
-#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
-#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
+#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
+ CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
+#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
+ CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group CONVERT_STORE_BLOCK
/** Partially store the 0 to (n-1)th rows of the given variables
@@ -365,8 +369,10 @@
* @param[in] Z The offset in z-axis direction
* @{
*/
-#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
-#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
+#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
+ STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
+#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
+ STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** Store a block that can be partial in both x and y dimensions
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
@@ -388,22 +394,23 @@
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
-#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
- if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
- { \
- STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
- } \
- else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
- { \
- STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
- } \
- else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
- { \
- STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
- } \
- else \
- { \
- STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
+#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, \
+ PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
+ if (!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
+ { \
+ STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
+ } \
+ else if ((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
+ { \
+ STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
+ } \
+ else if (!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
+ { \
+ STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
+ } \
+ else \
+ { \
+ STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in x but not y.
*
@@ -425,7 +432,7 @@
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \
- if(!(PARTIAL_COND_X)) \
+ if (!(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
@@ -453,7 +460,7 @@
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
*/
#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \
- if(!(PARTIAL_COND_Y)) \
+ if (!(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
@@ -463,8 +470,6 @@
}
/** @} */ // end of group STORE_BLOCK_PARTIAL
-#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
-
/** Boundary-aware GEMM block store
* @name STORE_BLOCK_BOUNDARY_AWARE
* This macro assumes the following schemes to achieve boundary-awareness:
@@ -516,32 +521,37 @@
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
* @{
*/
+#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case1: No partial blocks in either x or y
-#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
+#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, \
+ PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
// Case2: Partial blocks in y
-#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
+#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, \
+ PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
// Case3: Partial blocks in x
-#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
+#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, \
+ PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
#else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case4: Partial blocks in both x and y
-#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
- STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
+#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, \
+ PARTIAL_COND_Y, PARTIAL_COND_X) \
+ STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, \
+ PARTIAL_COND_Y, PARTIAL_COND_X)
#endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
#endif // defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** @} */ // end of group STORE_BLOCK_BOUNDARY_AWARE
-#if defined(PARTIAL_STORE_M0)
/** Compute the start m0 row (LHS, BIAS and DST) in a boundary-aware way so as to avoid padding
* @name COMPUTE_M0_START_ROW
* If there're any partial blocks in y dimension, they are placed at the beginning of the rows.
@@ -558,16 +568,16 @@
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @{
*/
+#if defined(PARTIAL_STORE_M0)
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0))))
#else // defined(PARTIAL_STORE_M0)
-#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
- ((uint)(y * M0))
+#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) ((uint)(y * M0))
#endif // defined(PARTIAL_STORE_M0)
/** @} */ // end of group COMPUTE_M0_START_ROW
/** Store a vector that can only be partial in x.
- *
+ * @name STORE_VECTOR_SELECT
* @note in case @p vec_size or @p leftover != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to end in a 0.
@@ -583,4 +593,4 @@
*/
#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \
STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond)
-/** @} */ // end of group STORE_VECTOR_SELECT \ No newline at end of file
+/** @} */ // end of group STORE_VECTOR_SELECT
diff --git a/src/core/CL/cl_kernels/space_to_depth.cl b/src/core/CL/cl_kernels/nchw/batch_to_space.cl
index 1217a37345..d83e81347e 100644
--- a/src/core/CL/cl_kernels/space_to_depth.cl
+++ b/src/core/CL/cl_kernels/nchw/batch_to_space.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,12 +23,14 @@
*/
#include "helpers.h"
-#if defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
-/** Space to depth transformation. (NCHW)
+#if defined(DATA_TYPE) && defined(BATCH_SIZE)
+/** Batch to space transformation. (NCHW)
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The input tensor batch size must be passed at compile time using -DCHANNEL_SIZE. e.g. -DCHANNEL_SIZE=2
- * @note The block shape must be passed at compile time using -DBLOCK_SHAPE. e.g. -DBLOCK_SHAPE=2
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*
* @param[in] input_ptr Pointer to the source tensor. Supported data types: All
* @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -39,6 +41,12 @@
* @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
* @param[in] batch_id The input tensor batch id
+ * @param[in] block_shape_ptr Pointer to the source tensor. Supported data types: S32
+ * @param[in] block_shape_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] block_shape_step_x block_shape_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] block_shape_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] block_shape_step_y block_shape_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
* @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
* @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
@@ -48,29 +56,38 @@
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
-__kernel void space_to_depth_nchw(
+__kernel void batch_to_space_nchw(
TENSOR4D_DECLARATION(input),
const int batch_id,
+ VECTOR_DECLARATION(block_shape),
TENSOR3D_DECLARATION(output))
{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+ Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
+
+ const int block_x = *((__global int *)vector_offset(&block, 0));
+ const int block_y = *((__global int *)vector_offset(&block, 1));
- const int r = (CHANNEL_SIZE / (BLOCK_SHAPE * BLOCK_SHAPE));
const int x = get_global_id(0);
const int y = get_global_id(1);
- const int z = get_global_id(2) % r;
+ const int z = get_global_id(2);
- const int in_x = x * BLOCK_SHAPE + (get_global_id(2) / r) % BLOCK_SHAPE;
- const int in_y = y * BLOCK_SHAPE + (get_global_id(2) / r) / BLOCK_SHAPE;
+ const int in_batch = batch_id + ((x % block_x) + (y % block_y) * block_x) * BATCH_SIZE;
+ const int in_x = x / block_x;
+ const int in_y = y / block_y;
- *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, batch_id));
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, in_batch));
}
-/** Space to depth transformation. (NHWC)
+#endif // defined(DATA_TYPE) && defined(BATCH_SIZE)
+
+#if defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y)
+/** Batch to space transformation. (NCHW)
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The input tensor batch size must be passed at compile time using -DCHANNEL_SIZE. e.g. -DCHANNEL_SIZE=2
- * @note The block shape must be passed at compile time using -DBLOCK_SHAPE. e.g. -DBLOCK_SHAPE=2
+ * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
+ * @note The block shape x must be passed at compile time using -DBLOCK_SHAPE_X. e.g. -DBLOCK_SHAPE_X=2
+ * @note The block shape y must be passed at compile time using -DBLOCK_SHAPE_Y. e.g. -DBLOCK_SHAPE_Y=2
*
* @param[in] input_ptr Pointer to the source tensor. Supported data types: All
* @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -90,22 +107,25 @@ __kernel void space_to_depth_nchw(
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
-__kernel void space_to_depth_nhwc(
+__kernel void batch_to_space_static_nchw(
TENSOR4D_DECLARATION(input),
const int batch_id,
TENSOR3D_DECLARATION(output))
{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
- const int r = (CHANNEL_SIZE / (BLOCK_SHAPE * BLOCK_SHAPE));
- const int x = get_global_id(1);
- const int y = get_global_id(2);
- const int z = get_global_id(0) % r;
+ const int block_x = BLOCK_SHAPE_X;
+ const int block_y = BLOCK_SHAPE_Y;
+
+ const int x = get_global_id(0) + CROP_LEFT;
+ const int y = get_global_id(1) + CROP_TOP;
+ const int z = get_global_id(2);
- const int in_x = x * BLOCK_SHAPE + (get_global_id(0) / r) % BLOCK_SHAPE;
- const int in_y = y * BLOCK_SHAPE + (get_global_id(0) / r) / BLOCK_SHAPE;
+ const int in_batch = batch_id + ((x % block_x) + (y % block_y) * block_x) * BATCH_SIZE;
+ const int in_x = x / block_x;
+ const int in_y = y / block_y;
- *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, z, in_x, in_y, batch_id));
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, in_batch));
}
-#endif // defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE) \ No newline at end of file
+#endif // defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y)
diff --git a/src/core/CL/cl_kernels/nchw/batchnormalization_layer.cl b/src/core/CL/cl_kernels/nchw/batchnormalization_layer.cl
new file mode 100644
index 0000000000..2d466661b3
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/batchnormalization_layer.cl
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#define ADD_OP(a, b) ((a) + (b))
+#define SUB_OP(a, b) ((a) - (b))
+#define MUL_OP(a, b) ((a) * (b))
+#define INVSQRT_OP(a) rsqrt((a))
+#define SQCVT_SAT(a) (a)
+
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(ACTIVATION_TYPE)
+#include "activation_float_helpers.h"
+
+/** Apply batch normalization.
+ *
+ * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
+ * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
+ *
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
+ * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
+ * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
+ * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
+ * @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
+ * @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
+ * @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
+ * @param[in] beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
+ * @param[in] beta_step_x beta_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
+ * @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
+ * @param[in] gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
+ * @param[in] gamma_step_x gamma_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
+ * @param[in] epsilon Epsilon parameter in the batch normalization equation
+ */
+__kernel void batchnormalization_layer_nchw(TENSOR3D_DECLARATION(input),
+#ifndef IN_PLACE
+ TENSOR3D_DECLARATION(output),
+#endif /* not IN_PLACE */
+ VECTOR_DECLARATION(mean),
+ VECTOR_DECLARATION(var),
+#ifndef USE_DEFAULT_BETA
+ VECTOR_DECLARATION(beta),
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+ VECTOR_DECLARATION(gamma),
+#endif /* USE_DEFAULT_GAMMA */
+ float epsilon)
+{
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
+#ifdef IN_PLACE
+ Tensor3D out = in;
+#else /* IN_PLACE */
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+#endif /* IN_PLACE */
+ Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
+ Vector var = CONVERT_TO_VECTOR_STRUCT(var);
+#ifndef USE_DEFAULT_BETA
+ Vector beta = CONVERT_TO_VECTOR_STRUCT(beta);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+ Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma);
+#endif /* USE_DEFAULT_GAMMA */
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ denominator = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ numerator = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ x_bar = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res = 0;
+
+ const int current_slice = get_global_id(2);
+
+ data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
+ denominator = *((__global DATA_TYPE *)(var.ptr + current_slice * var.stride_x));
+ denominator = INVSQRT_OP(ADD_OP(denominator, ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(epsilon))));
+
+ // Calculate x bar and store results
+ numerator = *((__global DATA_TYPE *)(mean.ptr + current_slice * mean.stride_x));
+ numerator = SUB_OP(data, numerator);
+ x_bar = MUL_OP(numerator, denominator);
+
+#ifndef USE_DEFAULT_GAMMA
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ gamma_vec = *((__global DATA_TYPE *)(gamma.ptr + current_slice * gamma.stride_x));
+
+ res = MUL_OP(gamma_vec, x_bar);
+#else /* USE_DEFAULT_GAMMA */
+ // gamma is equal to 1, no need to perform multiplications
+ res = x_bar;
+#endif /* USE_DEFAULT_GAMMA */
+
+#ifndef USE_DEFAULT_BETA
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ beta_vec = *((__global DATA_TYPE *)(beta.ptr + current_slice * beta.stride_x));
+ // beta is not zero, hence we need to perform the addition
+ res = ADD_OP(res, beta_vec);
+#endif /* USE_DEFAULT_BETA */
+
+ res = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, res, A_VAL, B_VAL);
+
+ VSTORE(VEC_SIZE)
+ (res, 0, (__global DATA_TYPE *)out.ptr);
+}
+#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE)*/ \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nchw/channel_shuffle.cl b/src/core/CL/cl_kernels/nchw/channel_shuffle.cl
new file mode 100644
index 0000000000..84396e122f
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/channel_shuffle.cl
@@ -0,0 +1,103 @@
+/*
+* Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z)
+
+// Check valid VEC_SIZES
+#if VEC_SIZE != 1 && VEC_SIZE != 2 && VEC_SIZE != 3 && VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16
+#error "Only vector sizes 1, 2, 3, 4, 8 and 16 are supported"
+#endif // VEC_SIZE != 1 && VEC_SIZE != 2 && VEC_SIZE != 3 && VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16
+
+#define DIV_MOD_UINT(x, y, div_res, mod_res) \
+ ({ \
+ div_res = (uint)((x)/(y)); \
+ uint r = div_res * (y); \
+ mod_res = (x)-r; \
+ })
+
+/** Performs channel shuffle when the data layout is NCHW. See https://arxiv.org/pdf/1707.01083.pdf for details.
+ *
+ * @note The vector size must be given as a preprocessor argument using -DVEC_SIZE=num. e.g. -DVEC_SIZE=4
+ * @note The depth of the tensor must be given as a preprocessor argument using -DSRC_DIM_Z=num. e.g. -DSRC_DIM_Z=64
+ * @note The number of groups must be given as a preprocessor argument using -DNUM_GROUPS=num_groups. e.g. -DNUM_GROUPS=2
+ * @note The number of channels in each group must be given as a preprocessor argument using -DK=num. e.g. -DK=1
+ * K is equal to num_channels / num_groups.
+ *
+ * @param[in] src_ptr Pointer to the source matrix. Supported data types: All
+ * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] src_step_w src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void channel_shuffle_nchw(TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst))
+{
+ uint curr_channel = 0; // channel id of input
+ uint batch_id = 0; // batch id
+ uint group_id = 0; // group id
+ uint channel_id = 0; // channel id within the group
+
+ // Compute curr_channel and batch_id
+ DIV_MOD_UINT(get_global_id(2), SRC_DIM_Z, batch_id, curr_channel);
+
+ // Compute group_id and channel_id
+ DIV_MOD_UINT(curr_channel, K, group_id, channel_id);
+
+ const uint x = get_global_id(0) * VEC_SIZE;
+ const uint y = get_global_id(1) * 2;
+ const uint z = channel_id * NUM_GROUPS + group_id;
+
+ // Load the Nx2 block
+ const __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * src_stride_y + curr_channel * src_stride_z + batch_id * src_stride_w;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ u0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ u1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y));
+
+ // Store blocks
+ __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z + batch_id * dst_stride_w;
+ VSTORE(VEC_SIZE)
+ (u0, 0, (__global DATA_TYPE *)(output_ptr + 0 * dst_stride_y));
+ VSTORE(VEC_SIZE)
+ (u1, 0, (__global DATA_TYPE *)(output_ptr + 1 * dst_stride_y));
+}
+
+#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z)
diff --git a/src/core/CL/cl_kernels/nchw/depth_to_space.cl b/src/core/CL/cl_kernels/nchw/depth_to_space.cl
new file mode 100644
index 0000000000..57183393d2
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/depth_to_space.cl
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019-2021, 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
+/** Depth to space transformation. (NCHW)
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The input tensor depth size must be passed at compile time using -DCHANNEL_SIZE. e.g. -DCHANNEL_SIZE=2
+ * @note The block shape must be passed at compile time using -DBLOCK_SHAPE. e.g. -DBLOCK_SHAPE=2
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: All.
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[in] batch_id The input tensor batch id
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void depth_to_space_nchw(
+ TENSOR3D_DECLARATION(input),
+ const int batch_id,
+ TENSOR4D_DECLARATION(output))
+{
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output);
+
+ const int r = (CHANNEL_SIZE / (BLOCK_SHAPE * BLOCK_SHAPE));
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+ const int z = get_global_id(2) % r;
+
+ const int out_x = x * BLOCK_SHAPE + (get_global_id(2) / r) % BLOCK_SHAPE;
+ const int out_y = y * BLOCK_SHAPE + (get_global_id(2) / r) / BLOCK_SHAPE;
+
+ *((__global DATA_TYPE *)tensor4D_offset(&out, out_x, out_y, z, batch_id)) = *((__global DATA_TYPE *)in.ptr);
+}
+#endif // defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
diff --git a/src/core/CL/cl_kernels/nchw/dequantization_layer.cl b/src/core/CL/cl_kernels/nchw/dequantization_layer.cl
new file mode 100644
index 0000000000..e0203f7408
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/dequantization_layer.cl
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST)
+/** This performs per channel dequantization of 8-bit signed integers to floating point. (NCHW)
+ *
+ * @note Source datatype should be given as a preprocessor argument using -DDATA_TYPE_SRC=type. e.g. -DDATA_TYPE_SRC=char
+ * @note Destination datatype should be given as a preprocessor argument using -DDATA_TYPE_DST=type. e.g. -DDATA_TYPE_DST=float
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: QSYMM8_PER_CHANNEL
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: F16/F32
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] scale Pointer to buffer with the per channel quantized scales
+ */
+__kernel void dequantization_layer_per_channel_nchw(
+ TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output),
+ __global float *scale)
+{
+ // Get pixels pointer
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+#if defined(LAST_ACCESSED_X)
+ // Check if access on width gets out of bounds
+ // If it does shift access vector to access elements within bounds
+ const int xi = (int)(get_global_id(0) * VEC_SIZE);
+ input.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * input_stride_x;
+ output.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * output_stride_x;
+
+ // Load data
+ VEC_DATA_TYPE(int, VEC_SIZE)
+ val = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_SRC *)input.ptr), VEC_DATA_TYPE(int, VEC_SIZE));
+
+ // Create scale vectors
+ const VEC_DATA_TYPE(float, VEC_SIZE)
+ vscale = scale[get_global_id(2)];
+
+ // Dequantize
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res = vscale * CONVERT((val), VEC_DATA_TYPE(float, VEC_SIZE));
+
+ // Store result
+ VSTORE(VEC_SIZE)
+ (CONVERT(res, VEC_DATA_TYPE(DATA_TYPE_DST, VEC_SIZE)), 0, (__global DATA_TYPE_DST *)output.ptr);
+#else // !defined(LAST_ACCESSED_X)
+ *((__global DATA_TYPE_DST *)(output.ptr)) = (DATA_TYPE_DST)((float)((int)(*((__global DATA_TYPE_SRC *)(input.ptr)))) * scale[get_global_id(2)]);
+#endif // defined(LAST_ACCESSED_X)
+}
+#endif // defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nchw/direct_convolution.cl b/src/core/CL/cl_kernels/nchw/direct_convolution.cl
new file mode 100644
index 0000000000..866f62da95
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/direct_convolution.cl
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "helpers_asymm.h"
+
+/** This kernel performs a direct convolution to convolve the low three dimensions.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data size must be passed at compile time using -DDATA_SIZE e.g. -DDATA_SIZE=32
+ * @note The convolution stride x must be passed at compile time using -DSTRIDE_X e.g. -DSTRIDE_X=1
+ * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
+ * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
+ * @note The output quantization multiplier must be passed at compile time using -DOUTPUT_MULTIPLIER e.g. -DOUTPUT_MULTIPLIER=1234
+ * @note The output quantization shift must be passed at compile time using -DOUTPUT_SHIFT e.g. -DOUTPUT_SHIFT=4
+ * @note The input offset quantization parameter must be passed at compile time using -DINPUT_OFFSET e.g. -DINPUT_OFFSET=3
+ * @note The weights offset quantization parameter must be passed at compile time using -DWEIGHTS_OFFSET e.g. -DWEIGHTS_OFFSET=3
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
+ * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
+ * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
+ * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
+ * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
+ * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
+ */
+__kernel void direct_convolution_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ TENSOR3D_DECLARATION(weights),
+#ifdef HAS_BIAS
+ VECTOR_DECLARATION(biases),
+#endif /* defined(HAS_BIAS) */
+ unsigned int weights_stride_w)
+{
+ const int id0 = get_global_id(0);
+ const int id1 = get_global_id(1);
+ const int id2 = get_global_id(2);
+
+ const int x_coords = (id0 * STRIDE_X) - PAD_LEFT;
+ const int y_coords = (id1 * STRIDE_Y) - PAD_TOP;
+
+ const int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0) * sizeof(DATA_TYPE);
+
+ __global uchar *src_addr = (__global uchar *)(src_ptr + src_offset_first_element_in_bytes);
+ __global uchar *weights_addr = (__global uchar *)(weights_ptr + weights_offset_first_element_in_bytes + id2 * weights_stride_w);
+ __global uchar *dst_addr = (__global uchar *)dst_ptr + dst_offset_first_element_in_bytes + x_offs + id1 * dst_stride_y + id2 * dst_stride_z;
+
+#ifdef IS_QUANTIZED
+ int acc_value = 0;
+#else /* IS_QUANTIZED */
+ DATA_TYPE acc_value = 0;
+#endif /* IS_QUANTIZED */
+ for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
+ {
+ for(int y = 0; y < WEI_HEIGHT; ++y)
+ {
+ for(int x = 0; x < WEI_WIDTH; ++x)
+ {
+ const int idx_x = (x_coords + x);
+ const int idx_y = (y_coords + y);
+ if((idx_x >= 0 && idx_x < SRC_WIDTH) && (idx_y >= 0 && idx_y < SRC_HEIGHT))
+ {
+ const int weight_offset = x + (WEI_HEIGHT * y);
+ const int input_offset = idx_x + SRC_WIDTH * idx_y;
+#ifdef IS_QUANTIZED
+ int weight = convert_int(*((__global DATA_TYPE *)weights_addr + weight_offset));
+ int input = convert_int(*((__global DATA_TYPE *)src_addr + input_offset));
+ acc_value += (input + INPUT_OFFSET) * (weight + WEIGHTS_OFFSET);
+#else /* IS_QUANTIZED */
+ DATA_TYPE weight = *((__global DATA_TYPE *)weights_addr + weight_offset);
+ DATA_TYPE input = *((__global DATA_TYPE *)src_addr + input_offset);
+ acc_value += input * weight;
+#endif /* IS_QUANTIZED */
+ }
+ }
+ }
+ src_addr += src_stride_z;
+ weights_addr += weights_stride_z;
+ }
+
+#ifdef HAS_BIAS
+
+ Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
+#ifdef IS_QUANTIZED
+ int bias = *((__global int *)(vector_offset(&biases, id2)));
+#else /* IS_QUANTIZED */
+ DATA_TYPE bias = *((__global DATA_TYPE *)(vector_offset(&biases, id2)));
+#endif /* IS_QUANTIZED */
+ acc_value += bias;
+
+#endif /* defined(HAS_BIAS) */
+
+#ifdef IS_QUANTIZED
+
+#if OUTPUT_SHIFT < 0
+ acc_value = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc_value, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 1);
+#else // OUTPUT_SHIFT < 0
+ acc_value = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(acc_value, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 1);
+#endif // OUTPUT_SHIFT < 0
+ acc_value = acc_value + OUTPUT_OFFSET;
+#endif /* IS_QUANTIZED */
+
+ *(__global DATA_TYPE *)dst_addr = CONVERT_SAT(acc_value, DATA_TYPE);
+} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/im2col.cl b/src/core/CL/cl_kernels/nchw/im2col.cl
index a1467a0b36..fddf918c63 100644
--- a/src/core/CL/cl_kernels/im2col.cl
+++ b/src/core/CL/cl_kernels/nchw/im2col.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,7 +22,6 @@
* SOFTWARE.
*/
#include "helpers.h"
-
#if defined(DATA_TYPE) && defined(ELEMENT_SIZE)
#if ELEMENT_SIZE == 1
@@ -861,500 +860,4 @@ __kernel void im2col_generic_padx0_pady0_nchw(
#endif // HAS_BIAS
}
#endif //defined(CONVOLVED_WIDTH) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(PAD_RIGHT) && defined(PAD_BOTTOM) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_DEPTH) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(VECTOR_SIZE) && defined(WIDTH_MOD_VECTOR_SIZE)
-
-#if defined(CONVOLVED_WIDTH) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_DEPTH) && defined(PAD_LEFT) && defined(PAD_RIGHT) && defined(PAD_TOP) && defined(PAD_BOTTOM) && defined(PAD_VALUE) && defined(VECTOR_SIZE) && defined(BOUNDARY_VECTOR_SIZE)
-
-#define VECTOR_N VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
-#define COND_N VEC_DATA_TYPE(COND_DATA_TYPE, VECTOR_SIZE)
-
-/** Store a 1x9 row or a 3x3 block in a boundary-aware manner to avoid paddings in the channel dimension
- * @name IM2COL1X9_NHWC_STORE
- *
- * @note To use this macro for a 3x3 block, @p ROW has to be 0
- *
- * @param[in] VECTOR_SIZE The non-boundary vector width of @p DATA. Supported: 1(scalar), 2, 3, 4, 8, 16
- * @param[in] BOUNDARY_VECTOR_SIZE The boundary vector width of @p DATA. Supported: 1-16, but has to be <= @p size
- * @param[in] DATA_TYPE Data type of @p DATA
- * @param[in] SRC_DEPTH Input channel size / depth
- * @param[in] DATA Value variable base name
- * @param[in] ROW The row number to store. Supported: 0-8
- * @param[in] OUTPUT_PTR Output pointer
- * @{
- */
-#if defined(VECTOR_SIZE) && defined(BOUNDARY_VECTOR_SIZE) && BOUNDARY_VECTOR_SIZE < VECTOR_SIZE
-#define IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
- const bool at_channel_boundary = get_global_id(0) == 0; \
- if(at_channel_boundary) \
- { \
- IM2COL1X9_NHWC_STORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
- } \
- else \
- { \
- IM2COL1X9_NHWC_STORE_NONPARTIAL(VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
- }
-#else // defined(VECTOR_SIZE) && defined(BOUNDARY_VECTOR_SIZE) && BOUNDARY_VECTOR_SIZE < VECTOR_SIZE
-#define IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
- IM2COL1X9_NHWC_STORE_NONPARTIAL(VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR)
-#endif // defined(VECTOR_SIZE) && defined(BOUNDARY_VECTOR_SIZE) && BOUNDARY_VECTOR_SIZE < VECTOR_SIZE
-
-#define IM2COL1X9_NHWC_STORE_NONPARTIAL(VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
- VSTORE(VECTOR_SIZE) \
- (DATA##0, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (0 + ROW * 9) * SRC_DEPTH); \
- VSTORE(VECTOR_SIZE) \
- (DATA##1, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (1 + ROW * 9) * SRC_DEPTH); \
- VSTORE(VECTOR_SIZE) \
- (DATA##2, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (2 + ROW * 9) * SRC_DEPTH); \
- VSTORE(VECTOR_SIZE) \
- (DATA##3, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (3 + ROW * 9) * SRC_DEPTH); \
- VSTORE(VECTOR_SIZE) \
- (DATA##4, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (4 + ROW * 9) * SRC_DEPTH); \
- VSTORE(VECTOR_SIZE) \
- (DATA##5, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (5 + ROW * 9) * SRC_DEPTH); \
- VSTORE(VECTOR_SIZE) \
- (DATA##6, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (6 + ROW * 9) * SRC_DEPTH); \
- VSTORE(VECTOR_SIZE) \
- (DATA##7, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (7 + ROW * 9) * SRC_DEPTH); \
- VSTORE(VECTOR_SIZE) \
- (DATA##8, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (8 + ROW * 9) * SRC_DEPTH);
-
-#define IM2COL1X9_NHWC_STORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
- (DATA##0, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (0 + ROW * 9) * SRC_DEPTH); \
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
- (DATA##1, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (1 + ROW * 9) * SRC_DEPTH); \
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
- (DATA##2, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (2 + ROW * 9) * SRC_DEPTH); \
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
- (DATA##3, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (3 + ROW * 9) * SRC_DEPTH); \
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
- (DATA##4, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (4 + ROW * 9) * SRC_DEPTH); \
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
- (DATA##5, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (5 + ROW * 9) * SRC_DEPTH); \
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
- (DATA##6, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (6 + ROW * 9) * SRC_DEPTH); \
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
- (DATA##7, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (7 + ROW * 9) * SRC_DEPTH); \
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
- (DATA##8, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (8 + ROW * 9) * SRC_DEPTH);
-/** @}*/
-
-/** This kernel performs im2col when the kernel size is 3x3 and the data layout is NHWC
- *
- * @note This kernel computes VECTOR_SIZE elements
- * @note This kernel stores VECTOR_SIZE or BOUNDARY_VECTOR_SIZE (if at boundary) elements
- * @note The vector size must be passed at compile time using -DVECTOR_SIZE: e.g. -DVECTOR_SIZE=2
- * @note The boundary vector size must be passed at compile time using -DBOUNDARY_VECTOR_SIZE: e.g. -DBOUNDARY_VECTOR_SIZE=1
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The width of output tensor after matrix multiplication must be passed at compile time using -DCONVOLVED_WIDTH: e.g. -DCONVOLVED_WIDTH=34
- * @note The kernel depth must be passed at compile time using -DSRC_DEPTH: e.g. -DSRC_DEPTH=3
- * @note The stride along the Y direction must be passed at compile time using -DSTRIDE_Y: e.g. -DSTRIDE_Y=1
- * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8_SIGNED/QASYMM8/F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes).
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes).
- */
-__kernel void im2col3x3_nhwc(
- TENSOR3D_DECLARATION(src),
- IMAGE_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- // input feature map, boundary-corrected (shift all non-boundary vectors by shift_amount) to avoid padding
- const int shift_amount = (int)VECTOR_SIZE - (int)BOUNDARY_VECTOR_SIZE;
- const int ch = max((int)(get_global_id(0) * VECTOR_SIZE) - shift_amount, 0);
- const int yo = get_global_id(1);
- const int batch = get_global_id(2); // batch size
-
- // Calculate input indices
- const int xi = (get_global_id(1) % CONVOLVED_WIDTH) * STRIDE_X;
- const int yi = (get_global_id(1) / (int)CONVOLVED_WIDTH) * STRIDE_Y;
-
- // Get input and output address
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + batch * (int)src_stride_w;
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + yo * (int)dst_stride_y + batch * (int)dst_stride_w;
-
- int yi_coord = 0;
- int3 offset = 0;
-
- // Clamp xi
- int3 xi_offset = ((int3)xi + (int3)(0, 1, 2) * DILATION_X - (int3)PAD_LEFT);
-#if PAD_LEFT != 0 || PAD_RIGHT != 0
-#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
- xi_offset = CLAMP(xi_offset, (int3)0, (int3)(SRC_WIDTH - 1));
-#endif // PAD_LEFT != 0 || PAD_RIGHT != 0
- // Multiply by src_stride_y as the width (X) dimension here is the second (y) dimension in src NHWC tensor
- xi_offset *= (int3)src_stride_y;
-
- // Out-of-bound condition for X
- int3 x_cond = (((int3)xi + (int3)(0, 1, 2) * DILATION_X - (int3)PAD_LEFT) < (int3)0) || (((int3)xi + (int3)(0, 1, 2) * DILATION_X - (int3)PAD_LEFT) >= (int3)SRC_WIDTH);
-
- // yi == 0
- // Clamp yi
- // yi_coord is casted to unsigned int in order to use just a min() operation
- // A "-1" 32 bit signed variable converted to unsigned gives 4294967295
- // This is a trick so that the values loaded in the padding areas are always from the last row (SRC_HEIGHT - 1),
- // because of the negative yi_coord wrap-around, but it gets overwritten by PAD_VALUE immediately as the wrap-around
- // also causes y_cond (y padding condition) to be satisfied
- yi_coord = yi - (int)PAD_TOP;
-
- // Clamp only if PAD_TOP or PAD_BOTTOM is not equal to 0
-#if PAD_TOP != 0 || PAD_BOTTOM != 0
- yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1));
-#endif // PAD_TOP != 0 || PAD_BOTTOM != 0
-
- // Compute offset
- offset = xi_offset + (yi_coord * (int)src_stride_z);
-
- // Load input values
- VECTOR_N values0 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s0));
- VECTOR_N values1 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s1));
- VECTOR_N values2 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s2));
-
-#if PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
- // Replace invalid values with PAD_VALUE
- int y_cond = (int)((uint)(yi - (int)PAD_TOP) >= (uint)(SRC_HEIGHT));
- values0 = select(values0, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s0)));
- values1 = select(values1, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s1)));
- values2 = select(values2, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s2)));
-#endif // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
-
- // yi == 1
- // Clamp yi_coord (it can be negative if PAD_TOP > 1)
- yi_coord = yi - (int)PAD_TOP + 1 * DILATION_Y;
-
- // Clamp only if PAD_TOP or PAD_BOTTOM is not equal to 0
-#if PAD_TOP != 0 || PAD_BOTTOM != 0
- yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1));
-#endif // PAD_TOP != 0 || PAD_BOTTOM != 0
-
- // Compute offset
- offset = xi_offset + (yi_coord * (int)src_stride_z);
-
- // Load input values
- VECTOR_N values3 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s0));
- VECTOR_N values4 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s1));
- VECTOR_N values5 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s2));
-
-#if PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
- // Replace invalid values with zeros
- y_cond = (int)((uint)(yi - (int)PAD_TOP + 1 * DILATION_Y) >= (uint)(SRC_HEIGHT));
- values3 = select(values3, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s0)));
- values4 = select(values4, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s1)));
- values5 = select(values5, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s2)));
-#endif // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
-
- // yi == 2
- // Clamp yi_coord
- yi_coord = yi - (int)PAD_TOP + 2 * DILATION_Y;
-
- // Clamp only if PAD_TOP or PAD_BOTTOM is not equal to 0
-#if PAD_TOP != 0 || PAD_BOTTOM != 0
- yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1));
-#endif // PAD_TOP != 0 || PAD_BOTTOM != 0
-
- // Compute offset
- offset = xi_offset + (yi_coord * (int)src_stride_z);
-
- // Load input values
- VECTOR_N values6 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s0));
- VECTOR_N values7 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s1));
- VECTOR_N values8 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s2));
-
-#if PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
- // Replace invalid values with PAD_VALUE
- y_cond = (int)((uint)(yi - (int)PAD_TOP + 2 * DILATION_Y) >= (uint)(SRC_HEIGHT));
- values6 = select(values6, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s0)));
- values7 = select(values7, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s1)));
- values8 = select(values8, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s2)));
-#endif // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
-
- // Store in a boundary-aware way to avoid padding
- IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, values, 0, output_ptr)
-
-#ifdef HAS_BIAS
- // We can use VECTOR_SIZE instead of BOUNDARY_VECTOR_SIZE even if it's at the boundary. This is because the bias is
- // added at the end of the channel, while the boundary vec is at the beginning of the channel.
- // The only case where the boundary vec is at the end of the channel is when there's only a single boundary vec in
- // the whole channel dimension, but in that case VECTOR_SIZE is also equal to BOUNDARY_VECTOR_SIZE
- // See the value of num_elems_processed_per_iteration in configure_opencl_kernel method in CLIm2ColKernel.cpp
- if((ch + VECTOR_SIZE) >= SRC_DEPTH)
- {
- *((__global DATA_TYPE *)(output_ptr) - ch + SRC_DEPTH * 9) = 1.0f;
- }
-#endif // HAS_BIAS
-}
-
-#if PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
-#define IM2COL1x9(i) \
- ({ \
- yi_coord = yi - (int)PAD_TOP + i * DILATION_Y; \
- yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1)); \
- \
- offset0 = xi_offset0 + (yi_coord * (int)src_stride_z); \
- offset1 = xi_offset1 + (yi_coord * (int)src_stride_z); \
- \
- VECTOR_N values0 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s0)); \
- VECTOR_N values1 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s1)); \
- VECTOR_N values2 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s2)); \
- VECTOR_N values3 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s3)); \
- VECTOR_N values4 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s4)); \
- VECTOR_N values5 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s5)); \
- VECTOR_N values6 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s6)); \
- VECTOR_N values7 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s7)); \
- VECTOR_N values8 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset1)); \
- \
- int y_cond = (int)((uint)(yi - (int)PAD_TOP + i * DILATION_Y) >= (uint)(SRC_HEIGHT)); \
- values0 = select(values0, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s0))); \
- values1 = select(values1, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s1))); \
- values2 = select(values2, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s2))); \
- values3 = select(values3, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s3))); \
- values4 = select(values4, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s4))); \
- values5 = select(values5, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s5))); \
- values6 = select(values6, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s6))); \
- values7 = select(values7, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s7))); \
- values8 = select(values8, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond1))); \
- \
- IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, values, i, output_ptr) \
- })
-#else // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
-#define IM2COL1x9(i) \
- ({ \
- yi_coord = yi - (int)PAD_TOP + i * DILATION_Y; \
- yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1)); \
- \
- offset0 = xi_offset0 + (yi_coord * (int)src_stride_z); \
- offset1 = xi_offset1 + (yi_coord * (int)src_stride_z); \
- \
- VECTOR_N values0 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s0)); \
- VECTOR_N values1 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s1)); \
- VECTOR_N values2 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s2)); \
- VECTOR_N values3 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s3)); \
- VECTOR_N values4 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s4)); \
- VECTOR_N values5 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s5)); \
- VECTOR_N values6 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s6)); \
- VECTOR_N values7 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s7)); \
- VECTOR_N values8 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset1)); \
- \
- IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, values, i, output_ptr) \
- })
-#endif // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
-
-/** This kernel performs im2col when the kernel size is 9x9 and the data layout is NHWC
- *
- * @note This kernel computes VECTOR_SIZE elements
- * @note This kernel stores VECTOR_SIZE or BOUNDARY_VECTOR_SIZE (if at boundary) elements
- * @note The vector size must be passed at compile time using -DVECTOR_SIZE: e.g. -DVECTOR_SIZE=2
- * @note The boundary vector size must be passed at compile time using -DBOUNDARY_VECTOR_SIZE: e.g. -DBOUNDARY_VECTOR_SIZE=1
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The width of output tensor after matrix multiplication must be passed at compile time using -DCONVOLVED_WIDTH: e.g. -DCONVOLVED_WIDTH=34
- * @note The kernel depth must be passed at compile time using -DSRC_DEPTH: e.g. -DSRC_DEPTH=3
- * @note The stride along the Y direction must be passed at compile time using -DSTRIDE_Y: e.g. -DSTRIDE_Y=1
- * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8_SIGNED/QASYMM8/F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes).
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes).
- */
-__kernel void im2col9x9_nhwc(
- TENSOR3D_DECLARATION(src),
- IMAGE_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- // input feature map, boundary-corrected (shift all non-boundary vectors by shift_amount) to avoid padding
- const int shift_amount = (int)VECTOR_SIZE - (int)BOUNDARY_VECTOR_SIZE;
- const int ch = max((int)(get_global_id(0) * VECTOR_SIZE) - shift_amount, 0);
- const int yo = get_global_id(1);
- const int batch = get_global_id(2); // batch size
-
- // Calculate input indices
- const int xi = (get_global_id(1) % CONVOLVED_WIDTH) * STRIDE_X;
- const int yi = (get_global_id(1) / (int)CONVOLVED_WIDTH) * STRIDE_Y;
-
- // Get input and output address
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + batch * (int)src_stride_w;
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + yo * (int)dst_stride_y + batch * (int)dst_stride_w;
-
- int yi_coord = 0;
- int8 offset0 = 0;
- int offset1 = 0;
-
- // Clamp xi
- int8 xi_offset0 = ((int8)xi + (int8)(0, 1, 2, 3, 4, 5, 6, 7) * DILATION_X - (int8)PAD_LEFT);
- int xi_offset1 = ((int)xi + (int)(8) * DILATION_X - (int)PAD_LEFT);
-
-#if PAD_LEFT != 0 || PAD_RIGHT != 0
-#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
- xi_offset0 = CLAMP(xi_offset0, (int8)0, (int8)(SRC_WIDTH - 1));
- xi_offset1 = CLAMP(xi_offset1, (int)0, (int)(SRC_WIDTH - 1));
-#endif // PAD_LEFT != 0 || PAD_RIGHT != 0
- xi_offset0 *= (int8)src_stride_y;
- xi_offset1 *= (int)src_stride_y;
-
- // Out-of-bound condition for X
- int8 x_cond0 = (((int8)xi + (int8)(0, 1, 2, 3, 4, 5, 6, 7) * DILATION_X - (int8)PAD_LEFT) < (int8)0) || (((int8)xi + (int8)(0, 1, 2, 3, 4, 5, 6, 7) * DILATION_X - (int8)PAD_LEFT) >= (int8)SRC_WIDTH);
- int x_cond1 = (((int)xi + (int)(8) * DILATION_X - (int)PAD_LEFT) < (int)0) || (((int)xi + (int)(8) * DILATION_X - (int)PAD_LEFT) >= (int)SRC_WIDTH);
-
- IM2COL1x9(0);
- IM2COL1x9(1);
- IM2COL1x9(2);
- IM2COL1x9(3);
- IM2COL1x9(4);
- IM2COL1x9(5);
- IM2COL1x9(6);
- IM2COL1x9(7);
- IM2COL1x9(8);
-
-#ifdef HAS_BIAS
- // We can use VECTOR_SIZE instead of BOUNDARY_VECTOR_SIZE even if it's at the boundary. This is because the bias is
- // added at the end of the channel, while the boundary vec is at the beginning of the channel.
- // The only case where the boundary vec is at the end of the channel is when there's only a single boundary vec in
- // the whole channel dimension, but in that case VECTOR_SIZE is also equal to BOUNDARY_VECTOR_SIZE
- // See the value of num_elems_processed_per_iteration in configure_opencl_kernel method in CLIm2ColKernel.cpp
- if((ch + VECTOR_SIZE) >= SRC_DEPTH)
- {
- *((__global DATA_TYPE *)(output_ptr) - ch + SRC_DEPTH * 81) = 1.0f;
- }
-#endif // HAS_BIAS
-}
-
-/** This opencl kernel performs a generic im2col implementation when the data layout is NHWC
- *
- * @note This kernel computes VECTOR_SIZE elements
- * @note This kernel stores VECTOR_SIZE or BOUNDARY_VECTOR_SIZE (if at boundary) elements
- * @note The vector size must be passed at compile time using -DVECTOR_SIZE: e.g. -DVECTOR_SIZE=2
- * @note The boundary vector size must be passed at compile time using -DBOUNDARY_VECTOR_SIZE: e.g. -DBOUNDARY_VECTOR_SIZE=1
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The width and height of the input tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT: e.g. -DSRC_WIDTH=128 and -DSRC_HEIGHT=128
- * @note The width of output tensor after matrix multiplication must be passed at compile time using -DCONVOLVED_WIDTH: e.g. -DCONVOLVED_WIDTH=34
- * @note The kernel width, height and depth must be passed at compile time using -DKERNEL_WIDTH, -DKERNEL_HEIGHT and -DSRC_DEPTH: e.g. -DKERNEL_WIDTH=3, -DKERNEL_HEIGHT=3 and -DSRC_DEPTH=64
- * @note The pad_left, pad_right, pad_top and pad_bottom must be passed at compile time using -DPAD_LEFT, -DPAD_RIGHT, -DPAD_TOP and -DPAD_BOTTOM: e.g. -DPAD_LEFT=1, -DPAD_RIGHT=2, -DPAD_TOP=3 and -DPAD_BOTTOM=2
- * @note The zero value to store in case we load values out-of-bounds must be passed at compile time using -DPAD_VALUE: e.g. -DPAD_VALUE=0.0
- * @note The stride along the X and Y directions must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y: e.g. -DSTRIDE_X=1 and -DSTRIDE_Y=1
- * @note The dilation_x and dilation_y must be passed at compile time using -DDILATION_X and -DDILATION_Y: e.g. -DDILATION_X=1, -DDILATION_Y=1
- * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8_SIGNED/QASYMM8/F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes).
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes).
- */
-__kernel void im2col_generic_nhwc(
- TENSOR3D_DECLARATION(src),
- IMAGE_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- // input feature map, boundary-corrected (shift all non-boundary vectors by shift_amount) to avoid padding
- const int shift_amount = (int)VECTOR_SIZE - (int)BOUNDARY_VECTOR_SIZE;
- const int ch = max((int)(get_global_id(0) * VECTOR_SIZE) - shift_amount, 0);
- const int yo = get_global_id(1);
- const int batch = get_global_id(2); // batch size
-
- // Calculate input indices
- const int xi = (get_global_id(1) % CONVOLVED_WIDTH) * STRIDE_X;
- const int yi = (get_global_id(1) / (int)CONVOLVED_WIDTH) * STRIDE_Y;
-
- // Get input and output address
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + batch * (int)src_stride_w;
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + yo * (int)dst_stride_y + batch * (int)dst_stride_w;
-
- int i = 0;
- for(int yk = 0; yk < KERNEL_HEIGHT; ++yk)
- {
- // Clamp yi_coord
- int yi_coord = yi + yk * DILATION_Y - (int)PAD_TOP;
- yi_coord = CLAMP(yi_coord, (int)0, (int)(SRC_HEIGHT - 1));
-
- // Out-of-bound condition for Y
- int y_border_condition = ((yi + yk * DILATION_Y - (int)PAD_TOP) < (int)0) || ((yi + yk * DILATION_Y - (int)PAD_TOP) >= (int)SRC_HEIGHT);
-
- for(int xk = 0; xk < KERNEL_WIDTH; ++xk)
- {
- // Clamp xi_coord
- int xi_coord = (xi + xk * DILATION_X - (int)PAD_LEFT);
- xi_coord = CLAMP(xi_coord, (int)0, (int)(SRC_WIDTH - 1));
-
- // Out-of-bound condition for X
- int x_border_condition = ((xi + xk * DILATION_X - (int)PAD_LEFT) < (int)0) || ((xi + xk * DILATION_X - (int)PAD_LEFT) >= (int)SRC_WIDTH);
-
- int offset = xi_coord * (int)src_stride_y + (yi_coord * (int)src_stride_z);
-
- VECTOR_N values0 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset));
-
-#if PAD_LEFT != 0 || PAD_TOP != 0 || PAD_RIGHT != 0 || PAD_BOTTOM != 0
- // Replace with PAD_VALUE if the value is out-of-bound
- values0 = select(values0, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)x_border_condition || (COND_N)(y_border_condition)));
-#endif // PAD_LEFT != 0 || PAD_TOP != 0 || PAD_RIGHT != 0 || PAD_BOTTOM != 0
-
- // Store in a boundary-aware way to avoid padding
-#if BOUNDARY_VECTOR_SIZE != VECTOR_SIZE
- const bool at_channel_boundary = get_global_id(0) == 0;
- if(at_channel_boundary)
- {
- VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE)
- (values0, 0, (__global DATA_TYPE *)(output_ptr) + i * (int)SRC_DEPTH);
- }
- else // at_channel_boundary
-#endif // BOUNDARY_VECTOR_SIZE != VECTOR_SIZE
- {
- VSTORE(VECTOR_SIZE)
- (values0, 0, (__global DATA_TYPE *)(output_ptr) + i * (int)SRC_DEPTH);
- }
- i++;
- }
- }
-
-#ifdef HAS_BIAS
- // We can use VECTOR_SIZE instead of BOUNDARY_VECTOR_SIZE even if it's at the boundary. This is because the bias is
- // added at the end of the channel, while the boundary vec is at the beginning of the channel.
- // The only case where the boundary vec is at the end of the channel is when there's only a single boundary vec in
- // the whole channel dimension, but in that case VECTOR_SIZE is also equal to BOUNDARY_VECTOR_SIZE
- // See the value of num_elems_processed_per_iteration in configure_opencl_kernel method in CLIm2ColKernel.cpp
- if((ch + VECTOR_SIZE) >= SRC_DEPTH)
- {
- *((__global DATA_TYPE *)(output_ptr) - ch + SRC_DEPTH * KERNEL_WIDTH * KERNEL_HEIGHT) = 1.0f;
- }
-#endif // HAS_BIAS
-}
-#endif // defined(CONVOLVED_WIDTH) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_DEPTH) && defined(PAD_LEFT) && defined(PAD_RIGHT) && defined(PAD_TOP) && defined(PAD_BOTTOM) && defined(PAD_VALUE) && defined(VECTOR_SIZE) && defined(BOUNDARY_VECTOR_SIZE)
-#endif // defined(DATA_TYPE) && defined(ELEMENT_SIZE)
+#endif // defined(DATA_TYPE) && defined(ELEMENT_SIZE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/normalization_layer.cl b/src/core/CL/cl_kernels/nchw/normalization_layer.cl
index ff4dc8ec38..deada49db5 100644
--- a/src/core/CL/cl_kernels/normalization_layer.cl
+++ b/src/core/CL/cl_kernels/nchw/normalization_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "helpers.h"
+#include "tile_helpers.h"
#define MUL_OP(x, y) ((x) * (y))
#define ADD_OP(x, y) ((x) + (y))
@@ -29,9 +30,6 @@
#define POW_OP(x, y) pow((x), (y))
#define SQCVT_SAT(a) (a)
-#define LOAD_OP(offset, ptr) vload4(offset, ptr)
-#define STORE_OP(data, offset, ptr) vstore4(data, offset, ptr)
-
#if defined(NUM_SLICES)
/** Apply cross-map normalization.
*
@@ -58,8 +56,8 @@
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
-__kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
+__kernel void normalization_layer_cross_map_nchw(TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
{
Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
@@ -80,7 +78,7 @@ __kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
for(int i = left_slice; i <= right_slice; i++)
{
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, 0, 0, i));
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(&in, 0, 0, i));
acc = ADD_OP(acc, MUL_OP(values, values));
}
@@ -88,9 +86,10 @@ __kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
normalized = POW_OP(acc, beta_v);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- normalized_pixel = DIV_OP(LOAD_OP(0, (__global DATA_TYPE *)in.ptr), normalized);
+ normalized_pixel = DIV_OP(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr), normalized);
- STORE_OP(normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
+ VSTORE(VEC_SIZE)
+ (normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
}
#endif /* defined(NUM_SLICES) */
@@ -101,6 +100,7 @@ __kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size, e.g. -DVEC_SIZE=16
* @note The radius should be given as a preprocessor argument using -DRADIUS=size. e.g. -DRADIUS=5
* @note Scaling coefficient (= alpha/norm_size), beta and kappa need to be passed at compile time using -DCOEFF, -DALPHA and -DKAPPA
+ * @note The leftover size in the X dimension shoud be given as preprocessor argument using -DVEC_SIZE_LEFTOVER is; x_dimension % VEC_SIZE. e.g. -DVEC_SIZE_LEFTOVER=1
*
* @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
* @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
@@ -126,17 +126,16 @@ __kernel void normalization_layer_in_map_nchw(TENSOR3D_DECLARATION(input),
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- acc = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0;
+ acc = 0;
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- coeff_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(COEFF);
+ coeff_v = SQCVT_SAT(COEFF);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- beta_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(BETA);
+ beta_v = SQCVT_SAT(BETA);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- kappa_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(KAPPA);
+ kappa_v = SQCVT_SAT(KAPPA);
- const int current_col = get_global_id(0) << 2;
- const int left_pos = max(-(int)RADIUS, -3 - current_col);
- const int right_pos = min((int)RADIUS, (int)WIDTH_SIZE - 1 - current_col);
+ const int left_pos = -(int)RADIUS;
+ const int right_pos = (int)RADIUS;
#if defined(IN_MAP_2D)
const int current_row = get_global_id(1);
@@ -152,90 +151,10 @@ __kernel void normalization_layer_in_map_nchw(TENSOR3D_DECLARATION(input),
{
#if defined(IN_MAP_2D)
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, i, j, 0));
-#else /* defined(IN_MAP_2D) */
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, i, 0, 0));
-#endif /* defined(IN_MAP_2D) */
- acc = ADD_OP(acc, MUL_OP(values, values));
- }
-#if defined(IN_MAP_2D)
- }
-#endif /* defined(IN_MAP_2D) */
-
- acc = ADD_OP(MUL_OP(acc, coeff_v), kappa_v);
- const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- normalized = POW_OP(acc, beta_v);
- const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- normalized_pixel = DIV_OP(LOAD_OP(0, (__global DATA_TYPE *)in.ptr), normalized);
-
- STORE_OP(normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
-}
-#endif // defined(WIDTH_SIZE)
-
-#if defined(NUM_SLICES)
-/** Apply in-map normalization when tensors are in the NHWC data layout format.
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size, e.g. -DVEC_SIZE=16
- * @note The radius should be given as a preprocessor argument using -DRADIUS=size. e.g. -DRADIUS=5
- * @note The number of slices should be given as a preprocessor argument using -DNUM_SLICES=size. e.g. -DNUM_SLICES=192
- * @note Scaling coefficient (= alpha/norm_size), beta and kappa need to be passed at compile time using -DCOEFF, -DALPHA and -DKAPPA
- *
- * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the first destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void normalization_layer_in_map_nhwc(TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- acc = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0;
- const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- coeff_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(COEFF);
- const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- beta_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(BETA);
- const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- kappa_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(KAPPA);
-
- const int current_cols = get_global_id(1);
- const int first_col = max(-(int)RADIUS, -current_cols);
- const int last_col = min((int)RADIUS, (int)get_global_size(1) - 1 - current_cols);
-
-#if defined(IN_MAP_2D)
- const int current_rows = get_global_id(2);
- const int first_row = max(-(int)RADIUS, -current_rows);
- const int last_row = min((int)RADIUS, (int)NUM_SLICES - 1 - current_rows);
-#endif /* defined(IN_MAP_2D) */
-
-#if defined(IN_MAP_2D)
- for(int j = first_row; j <= last_row; ++j)
- {
-#endif /* defined(IN_MAP_2D) */
- for(int i = first_col; i <= last_col; ++i)
- {
-#if defined(IN_MAP_2D)
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, 0, i, j));
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(&in, i, j, 0));
#else /* defined(IN_MAP_2D) */
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, 0, i, 0));
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(&in, i, 0, 0));
#endif /* defined(IN_MAP_2D) */
acc = ADD_OP(acc, MUL_OP(values, values));
}
@@ -247,8 +166,9 @@ __kernel void normalization_layer_in_map_nhwc(TENSOR3D_DECLARATION(input),
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
normalized = POW_OP(acc, beta_v);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- normalized_pixel = DIV_OP(LOAD_OP(0, (__global DATA_TYPE *)in.ptr), normalized);
+ normalized_pixel = DIV_OP(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr), normalized);
- STORE_OP(normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
+ VSTORE(VEC_SIZE)
+ (normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
}
-#endif /* defined(NUM_SLICES) */
+#endif // defined(WIDTH_SIZE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/normalize_planar_yuv_layer.cl b/src/core/CL/cl_kernels/nchw/normalize_planar_yuv_layer.cl
index f803f5288e..23a0de76f7 100644
--- a/src/core/CL/cl_kernels/normalize_planar_yuv_layer.cl
+++ b/src/core/CL/cl_kernels/nchw/normalize_planar_yuv_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -79,56 +79,4 @@ __kernel void normalize_planar_yuv_layer_nchw(TENSOR3D_DECLARATION(src),
VSTORE(VEC_SIZE)
(res, 0, (__global DATA_TYPE *)dst.ptr);
}
-
-/** Apply normalize_planar_yuv layer on tensors with NHWC data layout.
- *
- * @note Data type should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE e.g. -DVEC_SIZE=8
- *
- * @param[in] src_ptr Pointer to the first source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
- * @param[in] src_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes)
- * @param[in] src_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] src_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p src_ptr
- * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
- * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
- * @param[in] std_ptr Pointer to the std tensor. Supported data types: same as @p src_ptr
- * @param[in] std_stride_x Stride of the std tensor in X dimension (in bytes)
- * @param[in] std_step_x std_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] std_offset_first_element_in_bytes The offset of the first element in the var source tensor
- */
-__kernel void normalize_planar_yuv_layer_nhwc(TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- VECTOR_DECLARATION(mean),
- VECTOR_DECLARATION(std))
-{
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
- Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
- Vector std = CONVERT_TO_VECTOR_STRUCT(std);
-
- const uint current_slice = get_global_id(0);
-
- const TYPE curr_mean = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(mean.ptr + current_slice * VEC_SIZE * sizeof(DATA_TYPE)));
- const TYPE curr_std = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(std.ptr + current_slice * VEC_SIZE * sizeof(DATA_TYPE)));
-
- TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr);
- TYPE res = (data - curr_mean) / curr_std;
-
- VSTORE(VEC_SIZE)
- (res, 0, (__global DATA_TYPE *)dst.ptr);
-}
-#endif // defined(DATA_TYPE) && defined(VEC_SIZE)
+#endif // defined(DATA_TYPE) && defined(VEC_SIZE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/normalize_planar_yuv_layer_quantized.cl b/src/core/CL/cl_kernels/nchw/normalize_planar_yuv_layer_quantized.cl
index 27017a08ca..0f02ef6184 100644
--- a/src/core/CL/cl_kernels/normalize_planar_yuv_layer_quantized.cl
+++ b/src/core/CL/cl_kernels/nchw/normalize_planar_yuv_layer_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -76,17 +76,21 @@ __kernel void normalize_planar_yuv_layer_q8_nchw(TENSOR3D_DECLARATION(src),
const uint current_slice = get_global_id(2) % NUM_CHANNELS;
- float16 curr_mean_flt = (float16)(*((__global DATA_TYPE *)(mean.ptr + current_slice * sizeof(DATA_TYPE))));
- curr_mean_flt = round(curr_mean_flt - OFFSET_FLT) * SCALE_FLT;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ curr_mean_flt = (VEC_DATA_TYPE(float, VEC_SIZE))(*((__global DATA_TYPE *)(mean.ptr + current_slice * sizeof(DATA_TYPE))));
+ curr_mean_flt = round(curr_mean_flt - OFFSET_FLT) * SCALE_FLT;
- float16 curr_std_flt = (float16)(*((__global DATA_TYPE *)(std.ptr + current_slice * sizeof(DATA_TYPE))));
- curr_std_flt = round(curr_std_flt - OFFSET_FLT) * SCALE_FLT;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ curr_std_flt = (VEC_DATA_TYPE(float, VEC_SIZE))(*((__global DATA_TYPE *)(std.ptr + current_slice * sizeof(DATA_TYPE))));
+ curr_std_flt = round(curr_std_flt - OFFSET_FLT) * SCALE_FLT;
- float16 data_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr), float16);
- data_flt = round(data_flt - OFFSET_FLT) * SCALE_FLT;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ data_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr), VEC_DATA_TYPE(float, VEC_SIZE));
+ data_flt = round(data_flt - OFFSET_FLT) * SCALE_FLT;
// Perform normalization
- float16 res_flt = (data_flt - curr_mean_flt) / curr_std_flt;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res_flt = (data_flt - curr_mean_flt) / curr_std_flt;
const TYPE res_u8 = CONVERT_SAT(round(res_flt / SCALE_FLT) + OFFSET_FLT, TYPE);
VSTORE(VEC_SIZE)
@@ -94,65 +98,4 @@ __kernel void normalize_planar_yuv_layer_q8_nchw(TENSOR3D_DECLARATION(src),
}
#endif // defined(NUM_CHANNELS)
-
-/** Apply normalize_planar_yuv layer on tensors with NHWC data layout.
- *
- * @note Data type should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE e.g. -DVEC_SIZE=8
- * @note The quantization offset should be given as a preprocessor argument using -DOFFSET e.g. -DOFFSET=8
- * @note The quantization scale should be given as a preprocessor argument using -DSCALE e.g. -DSCALE=8
- *
- * @param[in] src_ptr Pointer to the first source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
- * @param[in] src_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes)
- * @param[in] src_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] src_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p src_ptr
- * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
- * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
- * @param[in] std_ptr Pointer to the std tensor. Supported data types: same as @p src_ptr
- * @param[in] std_stride_x Stride of the std tensor in X dimension (in bytes)
- * @param[in] std_step_x std_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] std_offset_first_element_in_bytes The offset of the first element in the var source tensor
- */
-__kernel void normalize_planar_yuv_layer_q8_nhwc(TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- VECTOR_DECLARATION(mean),
- VECTOR_DECLARATION(std))
-{
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
- Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
- Vector std = CONVERT_TO_VECTOR_STRUCT(std);
-
- const uint current_slice = get_global_id(0);
-
- float16 curr_mean_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(mean.ptr + current_slice * VEC_SIZE * sizeof(DATA_TYPE))), float16);
- curr_mean_flt = round(curr_mean_flt - OFFSET_FLT) * SCALE_FLT;
-
- float16 curr_std_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(std.ptr + current_slice * VEC_SIZE * sizeof(DATA_TYPE))), float16);
- curr_std_flt = round(curr_std_flt - OFFSET_FLT) * SCALE_FLT;
-
- float16 data_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr), float16);
- data_flt = round(data_flt - OFFSET_FLT) * (SCALE_FLT);
-
- // Perform normalization
- float16 res_flt = (data_flt - curr_mean_flt) / curr_std_flt;
-
- const TYPE res_u8 = CONVERT_SAT(round(res_flt / SCALE_FLT) + OFFSET_FLT, TYPE);
- VSTORE(VEC_SIZE)
- (res_u8, 0, (__global DATA_TYPE *)dst.ptr);
-}
-#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(OFFSET) && defined(SCALE)
+#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(OFFSET) && defined(SCALE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nchw/pooling_layer.cl b/src/core/CL/cl_kernels/nchw/pooling_layer.cl
new file mode 100644
index 0000000000..15ad116289
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/pooling_layer.cl
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(POOL_AVG) || defined(POOL_L2)
+#define POOL_OP(x, y) ((x) + (y))
+#else /* defined(POOL_AVG) || defined(POOL_L2) */
+#if defined(QUANTIZED)
+#define POOL_OP(x, y) (max((x), (y)))
+#else // defined(QUANTIZED)
+#define POOL_OP(x, y) (fmax((x), (y)))
+#endif // defined(QUANTIZED)
+#endif /* defined(POOL_AVG) || defined(POOL_L2) */
+
+#if defined(POOL_L2)
+#define POW2_OP(x, vec_size) ((x) * (x))
+#else /* defined(POOL_L2) */
+#define POW2_OP(x, vec_size) (x)
+#endif /* defined(POOL_L2) */
+
+#define DIV_OP(x, y) (x * (1.f / y))
+#define SQRT_OP(x) sqrt((x))
+
+#if defined(FP_MIXED_PRECISION) || defined(QUANTIZED)
+#define CONVERT_TO_ACC_DATA_TYPE(x, n) CONVERT(x, VEC_DATA_TYPE(ACC_DATA_TYPE, n))
+#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) CONVERT_TO_ACC_DATA_TYPE(vload##n(offset, ptr), n)
+#else /* defined(FP_MIXED_PRECISION) || defined(QUANTIZED)*/
+#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) vload##n(offset, ptr)
+#endif /* defined(FP_MIXED_PRECISION) || defined(QUANTIZED)*/
+
+ACC_DATA_TYPE calculate_avg_scale(const int pool_size_x, const int pool_size_y, const int upper_bound_w, const int upper_bound_h,
+ const int pad_x, const int pad_y, const int stride_x, const int stride_y)
+{
+ int start_x = get_global_id(0) * stride_x - pad_x;
+ int start_y = get_global_id(1) * stride_y - pad_y;
+ const int end_x = min(start_x + pool_size_x, upper_bound_w);
+ const int end_y = min(start_y + pool_size_y, upper_bound_h);
+#if defined(EXCLUDE_PADDING)
+ start_x = max(0, start_x);
+ start_y = max(0, start_y);
+#endif /* defined(EXCLUDE_PADDING) */
+ return ((end_y - start_y) * (end_x - start_x));
+}
+
+#if defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
+
+/** Performs a pooling function of pool size equal to N (NCHW)
+ *
+ * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32/QASYMM8;
+ * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
+ * @note In case of average pooling the following information must be passed at compile time:
+ * -DPOOL_AVG must be provided otherwise max pooling will be performed.
+ * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
+ * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
+ * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
+ * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32/QASYMM8
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void pooling_layer_MxN_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ int id0 = get_global_id(0);
+ int id1 = get_global_id(1);
+ int id2 = get_global_id(2);
+
+ int x_coords = (id0 * STRIDE_X) - PAD_X;
+ int y_coords = (id1 * STRIDE_Y) - PAD_Y;
+
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + y_coords * (int)src_stride_y + id2 * src_stride_z;
+
+ VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
+ vdata = INITIAL_VALUE;
+ ACC_DATA_TYPE sdata = INITIAL_VALUE;
+
+ const int end_x = min((int)POOL_SIZE_X, (int)(SRC_WIDTH - x_coords));
+ const int end_y = min((int)POOL_SIZE_Y, (int)(SRC_HEIGHT - y_coords));
+
+ // Load data
+ for(int y = 0; y < end_y; ++y)
+ {
+ if((y_coords + y) >= 0)
+ {
+ int x = 0;
+ for(; x <= (end_x - 8); x += 8)
+ {
+ int8 src_x = (int8)(x_coords + x) + VEC_OFFS(int, 8);
+#if defined(POOL_AVG) || defined(POOL_L2)
+ SELECT_VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
+ cond_x = CONVERT(src_x < 0, SELECT_VEC_DATA_TYPE(ACC_DATA_TYPE, 8));
+ src_x = clamp(src_x, (int8)0, (int8)(SRC_WIDTH - 1));
+ VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
+ data0 = select(VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)(src_addr + src_x.s0 * sizeof(DATA_TYPE) + y * src_stride_y)), (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))0, REVERSE(cond_x, 8));
+#else // defined(POOL_AVG) || defined(POOL_L2)
+ src_x = clamp(src_x, 0, SRC_WIDTH - 1);
+ VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
+ data0 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)(src_addr + src_x.s0 * sizeof(DATA_TYPE) + y * src_stride_y));
+#endif // defined(POOL_AVG) || defined(POOL_L2
+
+#if defined(POOL_L2)
+ // Raise to power of 2 for L2 Pooling
+ data0 *= data0;
+#endif /* defined(POOL_L2) */
+
+ vdata = POOL_OP(vdata, data0);
+ }
+
+ // Leftover
+ for(; x < end_x; ++x)
+ {
+ int src_x = x_coords + x;
+#if defined(POOL_AVG) || defined(POOL_L2)
+ SELECT_DATA_TYPE(ACC_DATA_TYPE)
+ cond_x = (src_x < 0);
+ src_x = clamp(src_x, 0, SRC_WIDTH - 1);
+ ACC_DATA_TYPE data0 = select((ACC_DATA_TYPE)(*((__global DATA_TYPE *)(src_addr + src_x * sizeof(DATA_TYPE) + y * src_stride_y))), (ACC_DATA_TYPE)0, cond_x);
+#else // defined(POOL_AVG) || defined(POOL_L2)
+ src_x = clamp(src_x, 0, SRC_WIDTH - 1);
+ ACC_DATA_TYPE data0 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)(src_addr + src_x * sizeof(DATA_TYPE) + y * src_stride_y)));
+#endif // defined(POOL_AVG) || defined(POOL_L2)
+
+#if defined(POOL_L2)
+ // Raise to power of 2 for L2 Pooling
+ data0 *= data0;
+#endif /* defined(POOL_L2) */
+
+ sdata = POOL_OP(sdata, data0);
+ }
+ }
+ }
+
+ // Reduce result
+ VEC_DATA_TYPE(ACC_DATA_TYPE, 4)
+ reduce4 = POOL_OP(vdata.s0123, vdata.s4567);
+ VEC_DATA_TYPE(ACC_DATA_TYPE, 2)
+ reduce2 = POOL_OP(reduce4.s01, reduce4.s23);
+ ACC_DATA_TYPE res = POOL_OP(reduce2.s0, reduce2.s1);
+ res = POOL_OP(res, sdata);
+
+#if defined(POOL_AVG) || defined(POOL_L2)
+ // Divide by pool region in case of average pooling
+ res = DIV_OP(res, calculate_avg_scale(POOL_SIZE_X, POOL_SIZE_Y, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y));
+#endif /* defined(POOL_AVG) || defined(POOL_L2) */
+
+#if defined(QUANTIZED)
+
+ DATA_TYPE result_q8 = CONVERT(res, DATA_TYPE);
+
+#if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT)
+
+ const float result_f32 = convert_float(result_q8);
+ const float input_offset = (float)OFFSET_IN1;
+ const float input_scale = (float)SCALE_IN1;
+ const float scale_out = (float)SCALE_OUT;
+ const float offset_out = (float)OFFSET_OUT;
+ const float in_f32 = (result_f32 - input_offset) * input_scale;
+ const float out_f32 = in_f32 / scale_out + offset_out;
+ result_q8 = CONVERT_SAT(convert_int_rte(out_f32), DATA_TYPE);
+
+#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
+
+ *(__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + id0 * sizeof(DATA_TYPE) + id1 * dst_stride_y + id2 * dst_stride_z) = result_q8;
+
+#else // defined(QUANTIZED)
+
+#if defined(POOL_L2)
+ // Take square root of the result in L2 pooling
+ res = SQRT_OP(res);
+#endif /* defined(POOL_L2) */
+
+ // Store result
+ *(__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + id0 * sizeof(DATA_TYPE) + id1 * dst_stride_y + id2 * dst_stride_z) = (DATA_TYPE)res;
+#endif // defined(QUANTIZED)
+}
+#endif // defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
+
+/** Performs a MAX pooling of pool size equal to 2, and record max value indices for NCHW.
+ *
+ * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F32
+ * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
+ * @note Tensors width and height must be passed at compile time using -DMAX_WIDTH and -DMAX_HEIGHT
+ * @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] indices_ptr Pointer to the indices tensor. Supported data types: U32
+ * @param[in] indices_stride_x Stride of the indices tensor in X dimension (in bytes)
+ * @param[in] indices_step_x indices_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] indices_stride_y Stride of the indices tensor in Y dimension (in bytes)
+ * @param[in] indices_step_y indices_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] indices_stride_z Stride of the indices tensor in Z dimension (in bytes)
+ * @param[in] indices_step_z indices_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] indices_offset_first_element_in_bytes The offset of the first element in the indices tensor
+ */
+__kernel void pooling_layer_2_nchw_indices(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ TENSOR3D_DECLARATION(indices))
+{
+ int id0 = get_global_id(0);
+ int id1 = get_global_id(1);
+ int id2 = get_global_id(2);
+
+ int2 x_coords = clamp((int2)((id0 * STRIDE_X) - PAD_X), (int2)0, (int2)(SRC_WIDTH - 1));
+ int2 y_coords = clamp((int2)((id1 * STRIDE_Y) - PAD_Y) + VEC_OFFS(int, 2), (int2)0, (int2)(SRC_HEIGHT - 1));
+
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + id2 * src_stride_z;
+
+ // Load data
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ data0 = VLOAD(2)(0, (__global DATA_TYPE *)(src_addr + x_coords.s0 * sizeof(DATA_TYPE) + y_coords.s0 * (int)src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ data1 = VLOAD(2)(0, (__global DATA_TYPE *)(src_addr + x_coords.s1 * sizeof(DATA_TYPE) + y_coords.s1 * (int)src_stride_y));
+
+ // Perform calculations
+ DATA_TYPE data0_max = POOL_OP(data0.s0, data0.s1);
+ DATA_TYPE data1_max = POOL_OP(data1.s0, data1.s1);
+ DATA_TYPE res = POOL_OP(data0_max, data1_max);
+ // Store result
+ *(__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + id0 * sizeof(DATA_TYPE) + id1 * dst_stride_y + id2 * dst_stride_z) = res;
+
+#if defined(SRC_BATCH)
+
+ uint offset_top = (x_coords.s0 + y_coords.s0 * SRC_WIDTH + id2 * (SRC_WIDTH * SRC_HEIGHT)) % SRC_BATCH;
+ uint offset_bottom = offset_top + SRC_WIDTH;
+
+ uint index0 = select(offset_top + 1, offset_top, isgreaterequal(data0.s0, data0.s1));
+ uint index1 = select(offset_bottom + 1, offset_bottom, isgreaterequal(data1.s0, data1.s1));
+ uint index = select(index1, index0, isgreaterequal(data0_max, data1_max));
+
+ *(__global uint *)(indices_ptr + indices_offset_first_element_in_bytes + id0 * sizeof(uint) + id1 * indices_stride_y + id2 * indices_stride_z) = index;
+
+#endif // defined(SRC_BATCH)
+} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/prior_box_layer.cl b/src/core/CL/cl_kernels/nchw/prior_box_layer.cl
index de10decdec..7524ba7b4a 100644
--- a/src/core/CL/cl_kernels/prior_box_layer.cl
+++ b/src/core/CL/cl_kernels/nchw/prior_box_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/core/CL/cl_kernels/reorg_layer.cl b/src/core/CL/cl_kernels/nchw/reorg_layer.cl
index 29344de37a..f66b17c1a6 100644
--- a/src/core/CL/cl_kernels/reorg_layer.cl
+++ b/src/core/CL/cl_kernels/nchw/reorg_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,45 +72,4 @@ __kernel void reorg_layer_nchw(
int src_offset = xi * sizeof(DATA_TYPE) + yi * src_stride_y + zi * src_stride_z;
*((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + src_offset));
}
-
-/** Performs a reorganization layer of input tensor to the output tensor when the data layout is NHWC
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The depth of the input tensor must be passed at compile time using -DSRC_DEPTH: e.g. -DSRC_DEPTH=64
- * @note The distance between 2 consecutive pixels along the x and y direction must be passed at compile time using -DSTRIDE: e.g. -DSTRIDE=2
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void reorg_layer_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- int xo = get_global_id(1);
- int yo = get_global_id(2);
- int zo = get_global_id(0);
- int xi, yi, zi;
-
- CALCULATE_SRC_COORDINATES(xo, yo, zo, xi, yi, zi);
-
- int src_offset = zi * sizeof(DATA_TYPE) + xi * src_stride_y + yi * src_stride_z;
-
- *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + src_offset));
-}
#endif // // defined(DATA_TYPE) && defined(SRC_DEPTH) && defined(STRIDE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nchw/scale.cl b/src/core/CL/cl_kernels/nchw/scale.cl
new file mode 100644
index 0000000000..2b4d6be9fb
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/scale.cl
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h"
+
+/** Transforms four 2D coordinates. This is used to map the output coordinates to the input coordinates.
+ *
+ * @param[in] coord 2D coordinates to transform.
+ * @param[in] scale input/output scale ratio
+ *
+ * @return a float8 containing 4 2D transformed values in the input image.
+ */
+inline const float8 transform_nearest(const float2 coord, const float2 scale)
+{
+#ifdef SAMPLING_POLICY_TOP_LEFT
+ const float4 in_x_coords = (float4)(coord.s0, 1 + coord.s0, 2 + coord.s0, 3 + coord.s0);
+ const float4 new_x = in_x_coords * (float4)(scale.s0);
+ const float4 new_y = (float4)(coord.s1 * scale.s1);
+ return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
+#elif SAMPLING_POLICY_CENTER
+ const float4 in_x_coords = (float4)(coord.s0, 1 + coord.s0, 2 + coord.s0, 3 + coord.s0);
+ const float4 new_x = (in_x_coords + ((float4)(0.5f))) * (float4)(scale.s0);
+ const float4 new_y = (float4)((coord.s1 + 0.5f) * scale.s1);
+ return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
+#else /* SAMPLING_POLICY */
+#error("Unsupported sampling policy");
+#endif /* SAMPLING_POLICY */
+}
+
+/** Transforms four 2D coordinates. This is used to map the output coordinates to the input coordinates.
+ *
+ * @param[in] coord 2D coordinates to transform.
+ * @param[in] scale input/output scale ratio
+ *
+ * @return a float8 containing 4 2D transformed values in the input image.
+ */
+inline const float8 transform_bilinear(const float2 coord, const float2 scale)
+{
+ const float4 in_x_coords = (float4)(coord.s0, 1 + coord.s0, 2 + coord.s0, 3 + coord.s0);
+#ifdef SAMPLING_POLICY_TOP_LEFT
+ const float4 new_x = in_x_coords * (float4)(scale.s0);
+ const float4 new_y = (float4)(coord.s1 * scale.s1);
+ return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
+#elif SAMPLING_POLICY_CENTER
+ const float4 new_x = (in_x_coords + ((float4)(0.5f))) * (float4)(scale.s0) - (float4)(0.5f);
+ const float4 new_y = (float4)((coord.s1 + 0.5f) * scale.s1 - 0.5f);
+ return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
+#else /* SAMPLING_POLICY */
+#error("Unsupported sampling policy");
+#endif /* SAMPLING_POLICY */
+}
+
+/** Performs an affine transformation on an image interpolating with the NEAREAST NEIGHBOUR method. Input and output are single channel U8 or S16.
+ *
+ * @note Sampling policy to used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
+ *
+ * @param[in] in_ptr Pointer to the source image. Supported data types: U8, S16.
+ * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] in_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] in_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[out] out_ptr Pointer to the destination image. Supported data types: U8, S16. (Must be the same as the input)
+ * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
+ * @param[in] out_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
+ * @param[in] out_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void scale_nearest_neighbour_nchw(
+ IMAGE_DECLARATION(in),
+ IMAGE_DECLARATION(out))
+{
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+
+ float8 transformed = transform_nearest((float2)(x * VEC_SIZE, y), (float2)(SCALE_X, SCALE_Y));
+#ifdef ALIGN_CORNERS
+ transformed = round(transformed);
+#endif // ALIGN_CORNERS
+
+ TILE(SELECT_DATA_TYPE(DATA_TYPE), 1, 4, cond);
+ cond[0].v = CONVERT(((transformed.even < 0) || (transformed.even >= (int)SRC_WIDTH)) || ((transformed.odd < 0) || (transformed.odd >= (int)SRC_HEIGHT)), SELECT_VEC_DATA_TYPE(DATA_TYPE, 4));
+
+ TILE(int, 1, 4, in_x);
+ TILE(int, 1, 4, in_y);
+ in_x[0].v = convert_int4(clamp(transformed.even, 0.f, SRC_WIDTH - 1.f));
+ in_y[0].v = convert_int4(clamp(transformed.odd, 0.f, SRC_HEIGHT - 1.f));
+
+ TILE(DATA_TYPE, 1, VEC_SIZE, out_vals);
+ LOOP_UNROLLING(int, i, 0, 1, VEC_SIZE,
+ {
+ out_vals[0].s[i] = select(*((__global DATA_TYPE *)(in_ptr + in_offset_first_element_in_bytes + in_x[0].s[i] * sizeof(DATA_TYPE) + in_y[0].s[i] * in_stride_y)), (DATA_TYPE)CONSTANT_VALUE, cond[0].s[i]);
+ })
+
+ __global uchar *out_addr = out_ptr + out_offset_first_element_in_bytes + x * out_step_x + y * out_stride_y;
+
+ if(x == get_global_size(0) - 1)
+ {
+#if VEC_SIZE == 1
+ VSTORE_PARTIAL(VEC_SIZE, VEC_SIZE_LEFTOVER)
+ (out_vals[0].s[0], 0, (__global DATA_TYPE *)out_addr);
+#else // VEC_SIZE == 1
+ VSTORE_PARTIAL(VEC_SIZE, VEC_SIZE_LEFTOVER)
+ (out_vals[0].v, 0, (__global DATA_TYPE *)out_addr);
+#endif // VEC_SIZE == 1
+ }
+ else
+ {
+#if VEC_SIZE == 1
+ VSTORE(VEC_SIZE)
+ (out_vals[0].s[0], 0, (__global DATA_TYPE *)out_addr);
+#else // VEC_SIZE == 1
+ VSTORE(VEC_SIZE)
+ (out_vals[0].v, 0, (__global DATA_TYPE *)out_addr);
+#endif // VEC_SIZE == 1
+ }
+}
+
+/** Performs an affine transformation on an image interpolating with the BILINEAR method.
+ *
+ * @note Sampling policy to used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
+ *
+ * @param[in] in_ptr Pointer to the source image. Supported data types: U8, S16.
+ * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] in_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] in_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[out] out_ptr Pointer to the destination image. Supported data types: U8, S16. (Must be the same as the input)
+ * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
+ * @param[in] out_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
+ * @param[in] out_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void scale_bilinear_nchw(
+ IMAGE_DECLARATION(in),
+ IMAGE_DECLARATION(out))
+{
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+
+ TILE(float, 1, 8, trans_coords);
+ TILE(float, 1, 8, floor_coords);
+ TILE(int, 1, 16, in_x);
+ TILE(int, 1, 16, in_y);
+
+ trans_coords[0].v = transform_bilinear((float2)(x * VEC_SIZE, y), (float2)(SCALE_X, SCALE_Y));
+ floor_coords[0].v = floor(trans_coords[0].v);
+
+ LOOP_UNROLLING(int, i, 0, 1, 4,
+ {
+ LOOP_UNROLLING(int, j, 0, 1, 4,
+ {
+ in_x[0].s[i * 4 + j] = floor_coords[0].s[i * 2 + 0] + (j % 2);
+ in_y[0].s[i * 4 + j] = floor_coords[0].s[i * 2 + 1] + (j > 1);
+ })
+ })
+
+#if defined(BORDER_MODE_CONSTANT)
+ TILE(SELECT_DATA_TYPE(DATA_TYPE), 1, 16, cond);
+ cond[0].v = CONVERT(((in_x[0].v < 0) || (in_x[0].v >= (int)SRC_WIDTH)) || ((in_y[0].v < 0) || (in_y[0].v >= (int)SRC_HEIGHT)), SELECT_VEC_DATA_TYPE(DATA_TYPE, 16));
+#endif // defined(BORDER_MODE_CONSTANT)
+
+ in_x[0].v = clamp(in_x[0].v, 0, (int16)((int)SRC_WIDTH - 1));
+ in_y[0].v = clamp(in_y[0].v, 0, (int16)((int)SRC_HEIGHT - 1));
+
+ TILE(DATA_TYPE, 1, 16, in_vals);
+
+ // Loads the values from the input image
+#if defined(BORDER_MODE_CONSTANT)
+ LOOP_UNROLLING(int, i, 0, 1, 16,
+ {
+ in_vals[0].s[i] = select(*((__global DATA_TYPE *)(in_ptr + in_offset_first_element_in_bytes + in_x[0].s[i] * sizeof(DATA_TYPE) + in_y[0].s[i] * (int)in_stride_y)), (DATA_TYPE)CONSTANT_VALUE, cond[0].s[i]);
+ })
+#else // defined(BORDER_MODE_CONSTANT)
+ LOOP_UNROLLING(int, i, 0, 1, 16,
+ {
+ in_vals[0].s[i] = *((__global DATA_TYPE *)(in_ptr + in_offset_first_element_in_bytes + in_x[0].s[i] * sizeof(DATA_TYPE) + in_y[0].s[i] * (int)in_stride_y));
+ })
+#endif // defined(BORDER_MODE_CONSTANT)
+
+ TILE(float, 1, 8, a);
+ TILE(float, 1, 8, b);
+
+ a[0].v = trans_coords[0].v - floor_coords[0].v;
+ b[0].v = ((float8)(1.f)) - a[0].v;
+
+#if defined(OFFSET) && defined(SCALE)
+ TILE(float, 1, 16, in_vals_f32);
+ TILE(float, 1, 4, out_vals_f32);
+
+ in_vals_f32[0].v = convert_float16(convert_int16(in_vals[0].v) - (int16)OFFSET) * (float16)SCALE;
+
+ // Bilinear interpolation: (in0 * b0 * b1) + (in1 * a0 * b1) + (in2 * b0 * a1) + (in3 * a0 * a1)
+ // (in4 * b2 * b3) + (in5 * a2 * b3) + (in6 * b2 * a3) + (in7 * a2 * a3)
+ // (in8 * b4 * b5) + (in9 * a4 * b5) + (in10 * b4 * a5) + (in11 * a4 * a5)
+ // (in12 * b6 * b7) + (in13 * a6 * b7) + (in14 * b6 * a7) + (in15 * a6 * a7)
+ LOOP_UNROLLING(int, i, 0, 1, 4,
+ {
+ out_vals_f32[0].s[i] = (in_vals_f32[0].s[i * 4 + 0] * b[0].s[i * 2] * b[0].s[i * 2 + 1]) + (in_vals_f32[0].s[i * 4 + 1] * a[0].s[i * 2] * b[0].s[i * 2 + 1]) + (in_vals_f32[0].s[i * 4 + 2] * b[0].s[i * 2] * a[0].s[i * 2 + 1]) + (in_vals_f32[0].s[i * 4 + 3] * a[0].s[i * 2] * a[0].s[i * 2 + 1]);
+ })
+
+ TILE(DATA_TYPE, 1, 4, out_vals_4);
+ TILE(DATA_TYPE, 1, VEC_SIZE, out_vals);
+
+ out_vals_4[0].v = CONVERT_SAT(convert_int4_sat_rtp(out_vals_f32[0].v / (float)SCALE) + OFFSET, VEC_DATA_TYPE(DATA_TYPE, 4));
+
+ LOOP_UNROLLING(int, i, 0, 1, VEC_SIZE,
+ {
+ out_vals[0].s[i] = out_vals_4[0].s[i];
+ })
+#else // defined(OFFSET) && defined(SCALE)
+
+ TILE(DATA_TYPE, 1, VEC_SIZE, out_vals);
+
+ // Bilinear interpolation: (in0 * b0 * b1) + (in1 * a0 * b1) + (in2 * b0 * a1) + (in3 * a0 * a1)
+ // (in4 * b2 * b3) + (in5 * a2 * b3) + (in6 * b2 * a3) + (in7 * a2 * a3)
+ // (in8 * b4 * b5) + (in9 * a4 * b5) + (in10 * b4 * a5) + (in11 * a4 * a5)
+ // (in12 * b6 * b7) + (in13 * a6 * b7) + (in14 * b6 * a7) + (in15 * a6 * a7)
+ LOOP_UNROLLING(int, i, 0, 1, VEC_SIZE,
+ {
+ out_vals[0].s[i] = (in_vals[0].s[i * 4 + 0] * b[0].s[i * 2] * b[0].s[i * 2 + 1]) + (in_vals[0].s[i * 4 + 1] * a[0].s[i * 2] * b[0].s[i * 2 + 1]) + (in_vals[0].s[i * 4 + 2] * b[0].s[i * 2] * a[0].s[i * 2 + 1]) + (in_vals[0].s[i * 4 + 3] * a[0].s[i * 2] * a[0].s[i * 2 + 1]);
+ })
+#endif // defined(OFFSET) && defined(SCALE)
+
+ __global uchar *out_addr = out_ptr + out_offset_first_element_in_bytes + x * out_step_x + y * out_stride_y;
+
+ if(x == get_global_size(0) - 1)
+ {
+#if VEC_SIZE == 1
+ VSTORE_PARTIAL(VEC_SIZE, VEC_SIZE_LEFTOVER)
+ (out_vals[0].s[0], 0, (__global DATA_TYPE *)out_addr);
+#else // VEC_SIZE == 1
+ VSTORE_PARTIAL(VEC_SIZE, VEC_SIZE_LEFTOVER)
+ (out_vals[0].v, 0, (__global DATA_TYPE *)out_addr);
+#endif // VEC_SIZE == 1
+ }
+ else
+ {
+#if VEC_SIZE == 1
+ VSTORE(VEC_SIZE)
+ (out_vals[0].s[0], 0, (__global DATA_TYPE *)out_addr);
+#else // VEC_SIZE == 1
+ VSTORE(VEC_SIZE)
+ (out_vals[0].v, 0, (__global DATA_TYPE *)out_addr);
+#endif // VEC_SIZE == 1
+ }
+} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nchw/space_to_batch.cl b/src/core/CL/cl_kernels/nchw/space_to_batch.cl
new file mode 100644
index 0000000000..91520213e8
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/space_to_batch.cl
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2018-2021, 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(BATCH_SIZE) && defined(DATA_TYPE) && defined(WIDTH_IN) && defined(HEIGHT_IN)
+/** Calculate the space to batch conversion.
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The block shape tensor rank must be passed at compile time using -DBLOCK_SHAPE_DIM. e.g. -DBLOCK_SHAPE_DIM=2
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source image
+ * @param[in] paddings_ptr Pointer to the second source image. Supported data types: S32
+ * @param[in] paddings_stride_x Stride of the paddinds tensor in X dimension (in bytes)
+ * @param[in] paddings_step_x paddings_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] paddings_stride_y Stride of the paddinds tensor in Y dimension (in bytes)
+ * @param[in] paddings_step_y paddings_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] paddingse_offset_first_element_in_bytes The offset of the first element in the second source image
+ * @param[in] block_shape_ptr Pointer to the block shape tensor. Supported data types: S32
+ * @param[in] block_shape_stride_x Stride of the block shape tensor in X dimension (in bytes)
+ * @param[in] block_shape_step_x block_shape_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] block_shape_offset_first_element_in_bytes The offset of the first element in the block shapetensor
+ * @param[in] batch_id The output tensor batch id
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void space_to_batch_nchw(
+ TENSOR4D_DECLARATION(input),
+ IMAGE_DECLARATION(paddings),
+ VECTOR_DECLARATION(block_shape),
+ const int batch_id,
+ TENSOR3D_DECLARATION(output))
+{
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
+ Image pad = CONVERT_TO_IMAGE_STRUCT_NO_STEP(paddings);
+ Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+ const int pad_left_x = *((__global int *)offset(&pad, 0, 0));
+ const int pad_right_x = *((__global int *)offset(&pad, 1, 0));
+ const int pad_left_y = *((__global int *)offset(&pad, 0, 1));
+ const int pad_right_y = *((__global int *)offset(&pad, 1, 1));
+
+ int block_x = *((__global int *)vector_offset(&block, 0));
+ int block_y = *((__global int *)vector_offset(&block, 1));
+
+ const int out_x = get_global_id(0);
+ const int out_y = get_global_id(1);
+ const int z = get_global_id(2);
+
+ const int pos_x = out_x * block_x + ((batch_id / BATCH_IN) % block_x);
+ const int pos_y = out_y * block_y + ((batch_id / BATCH_IN) / block_x);
+
+ if(((pos_y >= pad_left_y) && (pos_y < pad_left_y + HEIGHT_IN) && (pos_x >= pad_left_x) && (pos_x < pad_left_x + WIDTH_IN)))
+ {
+ const int w = batch_id % BATCH_IN;
+ const int in_x = pos_x - pad_left_x;
+ const int in_y = pos_y - pad_left_y;
+
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, w));
+ }
+}
+
+#endif // defined(BATCH_SIZE) && defined(DATA_TYPE) && defined(WIDTH_IN) && defined(HEIGHT_IN)
+
+#if defined(BATCH_SIZE) && defined(DATA_TYPE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y) && defined(PAD_LEFT_X) && defined(PAD_RIGHT_X) && defined(PAD_LEFT_Y) && defined(PAD_RIGHT_Y) && defined(WIDTH_IN) && defined(HEIGHT_IN)
+/** Calculate the space to batch conversion.
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
+ * @note The block shape x must be passed at compile time using -DBLOCK_SHAPE_X. e.g. -DBLOCK_SHAPE_X=2
+ * @note The block shape y must be passed at compile time using -DBLOCK_SHAPE_Y. e.g. -DBLOCK_SHAPE_Y=2
+ * @note The starting pad value of x must be passed at compile time using -DPAD_LEFT_X. e.g. -DPAD_LEFT_X=2
+ * @note The ending pad value of x must be passed at compile time using -DPAD_RIGHT_X. e.g. -DPAD_RIGHT_X=2
+ * @note The starting pad value of y must be passed at compile time using -DPAD_LEFT_Y. e.g. -DPAD_LEFT_Y=2
+ * @note The ending pad value of y must be passed at compile time using -DPAD_RIGHT_Y. e.g. -DPAD_RIGHT_X=2
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source image
+ * @param[in] batch_id The output tensor batch id
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void space_to_batch_static_nchw(
+ TENSOR4D_DECLARATION(input),
+ const int batch_id,
+ TENSOR3D_DECLARATION(output))
+{
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+ int block_x = BLOCK_SHAPE_X;
+ int block_y = BLOCK_SHAPE_Y;
+
+ const int out_x = get_global_id(0);
+ const int out_y = get_global_id(1);
+ const int z = get_global_id(2);
+
+ const int pos_x = out_x * block_x + ((batch_id / BATCH_IN) % block_x);
+ const int pos_y = out_y * block_y + ((batch_id / BATCH_IN) / block_x);
+
+ if(pos_y >= PAD_LEFT_Y && pos_y < PAD_LEFT_Y + HEIGHT_IN && pos_x >= PAD_LEFT_X && pos_x < PAD_LEFT_X + WIDTH_IN)
+ {
+ const int w = batch_id % BATCH_IN;
+ const int in_x = pos_x - PAD_LEFT_X;
+ const int in_y = pos_y - PAD_LEFT_Y;
+
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, w));
+ }
+}
+#endif // defined(BATCH_SIZE) && defined(DATA_TYPE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y) && defined(PAD_LEFT_X) && defined(PAD_RIGHT_X) && defined(PAD_LEFT_Y) && defined(PAD_RIGHT_Y) && defined(WIDTH_IN) && defined(HEIGHT_IN)
diff --git a/src/core/CL/cl_kernels/nchw/space_to_depth.cl b/src/core/CL/cl_kernels/nchw/space_to_depth.cl
new file mode 100644
index 0000000000..8097f65942
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/space_to_depth.cl
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019-2021, 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
+/** Space to depth transformation. (NCHW)
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The input tensor batch size must be passed at compile time using -DCHANNEL_SIZE. e.g. -DCHANNEL_SIZE=2
+ * @note The block shape must be passed at compile time using -DBLOCK_SHAPE. e.g. -DBLOCK_SHAPE=2
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[in] batch_id The input tensor batch id
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void space_to_depth_nchw(
+ TENSOR4D_DECLARATION(input),
+ const int batch_id,
+ TENSOR3D_DECLARATION(output))
+{
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+ const int r = (CHANNEL_SIZE / (BLOCK_SHAPE * BLOCK_SHAPE));
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+ const int z = get_global_id(2) % r;
+
+ const int in_x = x * BLOCK_SHAPE + (get_global_id(2) / r) % BLOCK_SHAPE;
+ const int in_y = y * BLOCK_SHAPE + (get_global_id(2) / r) / BLOCK_SHAPE;
+
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, batch_id));
+}
+#endif // defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
diff --git a/src/core/CL/cl_kernels/nchw/upsample_layer.cl b/src/core/CL/cl_kernels/nchw/upsample_layer.cl
new file mode 100644
index 0000000000..723c491165
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/upsample_layer.cl
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+/** This function applies upsample on an input image. (NCHW)
+ *
+ * @attention The following variables must be passed at compile time:
+ * -# -DDATA_TYPE = Tensor data type. Supported data types: All
+ * -# -DVEC_SIZE_IN = Input vector size
+ * -# -DVEC_SIZE_OUT = Output vector size
+ * -# -DLAST_ACCESSED_X_IN = The input element that is on the X border (threads trying to set this, might need to step back a bit)
+ * -# -DLAST_ACCESSED_X_OUT = The output element that is on the X border (threads trying to set this, might need to step back a bit)
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: All
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[out] dst_ptr Pointer to the destination image. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination image in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination image in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void upsample_layer_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+ Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
+
+#if defined(VEC_SIZE_IN) && defined(VEC_SIZE_OUT) && defined(LAST_ACCESSED_X_IN) && defined(LAST_ACCESSED_X_OUT)
+ // Check if access on width gets out of bounds
+ // If it does shift access vector to access elements within bounds
+ const int xi_in = (int)(get_global_id(0) * VEC_SIZE_IN);
+ const int xi_out = (int)(get_global_id(0) * VEC_SIZE_OUT);
+ src.ptr -= max(xi_in - (int)LAST_ACCESSED_X_IN, 0) * src_stride_x;
+ dst.ptr -= max(xi_out - (int)LAST_ACCESSED_X_OUT, 0) * dst_stride_x;
+
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ data = vload8(0, (__global DATA_TYPE *)src.ptr);
+
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ data_out = (VEC_DATA_TYPE(DATA_TYPE, 16))(data.s0, data.s0, data.s1, data.s1, data.s2, data.s2, data.s3, data.s3, data.s4, data.s4, data.s5, data.s5, data.s6, data.s6, data.s7, data.s7);
+
+ vstore16(data_out, 0, (__global DATA_TYPE *)dst.ptr);
+ vstore16(data_out, 0, (__global DATA_TYPE *)tensor3D_offset(&dst, 0, 1, 0));
+#else // !defined(VEC_SIZE_IN) && defined(VEC_SIZE_OUT) && defined(LAST_ACCESSED_X_IN) && defined(LAST_ACCESSED_X_OUT)
+ *((__global DATA_TYPE *)tensor3D_offset(&dst, 0, 0, 0)) = *((__global DATA_TYPE *)src.ptr);
+ *((__global DATA_TYPE *)tensor3D_offset(&dst, 0, 1, 0)) = *((__global DATA_TYPE *)src.ptr);
+#endif // defined(VEC_SIZE_IN) && defined(VEC_SIZE_OUT) && defined(LAST_ACCESSED_X_IN) && defined(LAST_ACCESSED_X_OUT)
+} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nchw/winograd_filter_transform.cl b/src/core/CL/cl_kernels/nchw/winograd_filter_transform.cl
new file mode 100644
index 0000000000..85eff9e6d9
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/winograd_filter_transform.cl
@@ -0,0 +1,911 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(SRC_DIM_Z)
+/** This OpenCL kernel performs Winograd filter transform 3x3/3x1/1x3 when the data layout is NCHW and the output tile is 2x2/2x1/1x2
+ *
+ * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
+ * @note If this kernel is used to perform Winograd filter transform 3x1, -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd filter transform 1x3, -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_2x2_3x3_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DIM_Z);
+
+ const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
+
+ // Load the values from the input tensor
+#if defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w0 = vload3(0, (__global DATA_TYPE *)(src_addr));
+#elif defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w0 = (VEC_DATA_TYPE(DATA_TYPE, 3))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)));
+#else // defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w0 = vload3(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w1 = vload3(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w2 = vload3(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+
+ // Row 0
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ out0 = 0.0f;
+ out0.s0 = (w0.s0);
+ out0.s1 = (w0.s0 + w0.s1 + w0.s2) * 0.5f;
+ out0.s2 = (w0.s0 + w0.s2 - w0.s1) * 0.5f;
+ out0.s3 = (w0.s2);
+
+#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ // Row 1
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ out1 = 0.0f;
+ out1.s0 = (w0.s0 + w1.s0 + w2.s0) * 0.5f;
+ out1.s1 = (w0.s0 + w1.s0 + w2.s0 + w0.s1 + w1.s1 + w2.s1 + w0.s2 + w1.s2 + w2.s2) * 0.25f;
+ out1.s2 = (w0.s0 + w1.s0 + w2.s0 + w0.s2 + w1.s2 + w2.s2 - w0.s1 - w1.s1 - w2.s1) * 0.25f;
+ out1.s3 = (w0.s2 + w1.s2 + w2.s2) * 0.5f;
+
+ // Row 2
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ out2 = 0.0f;
+ out2.s0 = (w0.s0 + w2.s0 - w1.s0) * 0.5f;
+ out2.s1 = (w0.s0 + w2.s0 + w0.s1 + w2.s1 + w0.s2 + w2.s2 - w1.s0 - w1.s1 - w1.s2) * 0.25f;
+ out2.s2 = (w0.s0 + w2.s0 + w1.s1 + w0.s2 + w2.s2 - w1.s0 - w0.s1 - w2.s1 - w1.s2) * 0.25f;
+ out2.s3 = (w0.s2 + w2.s2 - w1.s2) * 0.5f;
+
+ // Row 3
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ out3 = 0.0f;
+ out3.s0 = (w2.s0);
+ out3.s1 = (w2.s0 + w2.s1 + w2.s2) * 0.5f;
+ out3.s2 = (w2.s0 + w2.s2 - w2.s1) * 0.5f;
+ out3.s3 = (w2.s2);
+#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+
+ int z = get_global_id(2);
+ int x0 = z / SRC_DIM_Z; // idx filter
+ int y0 = z % SRC_DIM_Z; // idx channel
+
+ // Get output address
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x0 * dst_stride_x + y0 * dst_stride_y;
+
+ // Store the values across the channels
+ // 16 channels for 3x3 kernels
+ // 4 channels for 3x1 or 1x3 kernels
+ *(__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z) = out0.s0;
+ *(__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z) = out0.s1;
+ *(__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z) = out0.s2;
+ *(__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z) = out0.s3;
+
+#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ *(__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z) = out1.s0;
+ *(__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z) = out1.s1;
+ *(__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z) = out1.s2;
+ *(__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z) = out1.s3;
+ *(__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z) = out2.s0;
+ *(__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z) = out2.s1;
+ *(__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z) = out2.s2;
+ *(__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z) = out2.s3;
+ *(__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z) = out3.s0;
+ *(__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z) = out3.s1;
+ *(__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z) = out3.s2;
+ *(__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z) = out3.s3;
+#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+}
+
+/** This OpenCL kernel performs Winograd filter transform 3x3/3x1/1x3 when the data layout is NCHW and the output tile is 4x4/4x1/1x4
+ *
+ * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
+ * @note If this kernel is used to perform Winograd filter transform 3x1, -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd filter transform 1x3, -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_4x4_3x3_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DIM_Z);
+
+ const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
+
+ // Load the values from the input tensor
+#if defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w0 = vload3(0, (__global DATA_TYPE *)(src_addr));
+#elif defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w0 = (VEC_DATA_TYPE(DATA_TYPE, 3))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)));
+#else // defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w0 = vload3(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w1 = vload3(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ w2 = vload3(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+
+ // Row 0
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out0 = 0.0f;
+ out0.s0 = (w0.s0) / 16.f;
+ out0.s1 = (-w0.s0 - w0.s1 - w0.s2) / 24.f;
+ out0.s2 = (-w0.s0 + w0.s1 - w0.s2) / 24.f;
+ out0.s3 = (w0.s0 + 2.f * w0.s1 + 4.f * w0.s2) / 96.f;
+ out0.s4 = (w0.s0 - 2.f * w0.s1 + 4.f * w0.s2) / 96.f;
+ out0.s5 = (w0.s2) / 4.f;
+
+#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ // Row 1
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out1 = 0.0f;
+ out1.s0 = (-w0.s0 - w1.s0 - w2.s0) / 24.f;
+ out1.s1 = (w0.s0 + w1.s0 + w2.s0 + w0.s1 + w1.s1 + w2.s1 + w0.s2 + w1.s2 + w2.s2) / 36.f;
+ out1.s2 = (w0.s0 + w1.s0 + w2.s0 - w0.s1 - w1.s1 - w2.s1 + w0.s2 + w1.s2 + w2.s2) / 36.f;
+ out1.s3 = (-w0.s0 - w1.s0 - w2.s0 + 2.f * (-w0.s1 - w1.s1 - w2.s1) + 4.f * (-w0.s2 - w1.s2 - w2.s2)) / 144.f;
+ out1.s4 = (-w0.s0 - w1.s0 - w2.s0 + 2.f * (w0.s1 + w1.s1 + w2.s1) + 4.f * (-w0.s2 - w1.s2 - w2.s2)) / 144.f;
+ out1.s5 = (-w0.s2 - w1.s2 - w2.s2) / 6.f;
+
+ // Row 2
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out2 = 0.0f;
+ out2.s0 = (-w0.s0 + w1.s0 - w2.s0) / 24.f;
+ out2.s1 = (w0.s0 - w1.s0 + w2.s0 + w0.s1 - w1.s1 + w2.s1 + w0.s2 - w1.s2 + w2.s2) / 36.f;
+ out2.s2 = (w0.s0 - w1.s0 + w2.s0 - w0.s1 + w1.s1 - w2.s1 + w0.s2 - w1.s2 + w2.s2) / 36.f;
+ out2.s3 = (-w0.s0 + w1.s0 - w2.s0 + 2.f * (-w0.s1 + w1.s1 - w2.s1) + 4.f * (-w0.s2 + w1.s2 - w2.s2)) / 144.f;
+ out2.s4 = (-w0.s0 + w1.s0 - w2.s0 + 2.f * (w0.s1 - w1.s1 + w2.s1) + 4.f * (-w0.s2 + w1.s2 - w2.s2)) / 144.f;
+ out2.s5 = (-w0.s2 + w1.s2 - w2.s2) / 6.f;
+
+ // Row 3
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out3 = 0.0f;
+ out3.s0 = (w0.s0 + 2.f * w1.s0 + 4.f * w2.s0) / 96.f;
+ out3.s1 = (-w0.s0 - 2.f * w1.s0 - 4.f * w2.s0 - w0.s1 - 2.f * w1.s1 - 4.f * w2.s1 - w0.s2 - 2.f * w1.s2 - 4.f * w2.s2) / 144.f;
+ out3.s2 = (-w0.s0 - 2.f * w1.s0 - 4.f * w2.s0 + w0.s1 + 2.f * w1.s1 + 4.f * w2.s1 - w0.s2 - 2.f * w1.s2 - 4.f * w2.s2) / 144.f;
+ out3.s3 = ((w0.s0 + 2.f * w1.s0 + 4.f * w2.s0) + 2.f * (w0.s1 + 2.f * w1.s1 + 4.f * w2.s1) + 4.f * (w0.s2 + 2.f * w1.s2 + 4.f * w2.s2)) / 576.f;
+ out3.s4 = ((w0.s0 + 2.f * w1.s0 + 4.f * w2.s0) + 2.f * (-w0.s1 - 2.f * w1.s1 - 4.f * w2.s1) + 4.f * (w0.s2 + 2.f * w1.s2 + 4.f * w2.s2)) / 576.f;
+ out3.s5 = (w0.s2 + 2.f * w1.s2 + 4.f * w2.s2) / 24.f;
+
+ // Row 4
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out4 = 0.0f;
+ out4.s0 = (w0.s0 - 2.f * w1.s0 + 4.f * w2.s0) / 96.f;
+ out4.s1 = (-w0.s0 + 2.f * w1.s0 - 4.f * w2.s0 - w0.s1 + 2.f * w1.s1 - 4.f * w2.s1 - w0.s2 + 2.f * w1.s2 - 4.f * w2.s2) / 144.f;
+ out4.s2 = (-w0.s0 + 2.f * w1.s0 - 4.f * w2.s0 + w0.s1 - 2.f * w1.s1 + 4.f * w2.s1 - w0.s2 + 2.f * w1.s2 - 4.f * w2.s2) / 144.f;
+ out4.s3 = ((w0.s0 - 2.f * w1.s0 + 4.f * w2.s0) + 2.f * (w0.s1 - 2.f * w1.s1 + 4.f * w2.s1) + 4.f * (w0.s2 - 2.f * w1.s2 + 4.f * w2.s2)) / 576.f;
+ out4.s4 = ((w0.s0 - 2.f * w1.s0 + 4.f * w2.s0) + 2.f * (-w0.s1 + 2.f * w1.s1 - 4.f * w2.s1) + 4.f * (w0.s2 - 2.f * w1.s2 + 4.f * w2.s2)) / 576.f;
+ out4.s5 = (w0.s2 - 2.f * w1.s2 + 4.f * w2.s2) / 24.f;
+
+ // Row 5
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out5 = 0.0f;
+ out5.s0 = (w2.s0) / 4.f;
+ out5.s1 = (-w2.s0 - w2.s1 - w2.s2) / 6.f;
+ out5.s2 = (-w2.s0 + w2.s1 - w2.s2) / 6.f;
+ out5.s3 = (w2.s0 + 2.f * w2.s1 + 4.f * w2.s2) / 24.f;
+ out5.s4 = (w2.s0 - 2.f * w2.s1 + 4.f * w2.s2) / 24.f;
+ out5.s5 = (w2.s2);
+#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+
+ int z = get_global_id(2);
+ int x0 = z / SRC_DIM_Z; // idx filter
+ int y0 = z % SRC_DIM_Z; // idx channel
+
+ // Get output address
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x0 * dst_stride_x + y0 * dst_stride_y;
+
+ // Store the values across the channels
+ // 36 channels for 3x3 kernels
+ // 6 channels for 3x1 or 1x3 kernels
+ *(__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z) = out0.s0;
+ *(__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z) = out0.s1;
+ *(__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z) = out0.s2;
+ *(__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z) = out0.s3;
+ *(__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z) = out0.s4;
+ *(__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z) = out0.s5;
+
+#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ *(__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z) = out1.s0;
+ *(__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z) = out1.s1;
+ *(__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z) = out1.s2;
+ *(__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z) = out1.s3;
+ *(__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z) = out1.s4;
+ *(__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z) = out1.s5;
+ *(__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z) = out2.s0;
+ *(__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z) = out2.s1;
+ *(__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z) = out2.s2;
+ *(__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z) = out2.s3;
+ *(__global DATA_TYPE *)(dst_addr + 16 * dst_stride_z) = out2.s4;
+ *(__global DATA_TYPE *)(dst_addr + 17 * dst_stride_z) = out2.s5;
+ *(__global DATA_TYPE *)(dst_addr + 18 * dst_stride_z) = out3.s0;
+ *(__global DATA_TYPE *)(dst_addr + 19 * dst_stride_z) = out3.s1;
+ *(__global DATA_TYPE *)(dst_addr + 20 * dst_stride_z) = out3.s2;
+ *(__global DATA_TYPE *)(dst_addr + 21 * dst_stride_z) = out3.s3;
+ *(__global DATA_TYPE *)(dst_addr + 22 * dst_stride_z) = out3.s4;
+ *(__global DATA_TYPE *)(dst_addr + 23 * dst_stride_z) = out3.s5;
+ *(__global DATA_TYPE *)(dst_addr + 24 * dst_stride_z) = out4.s0;
+ *(__global DATA_TYPE *)(dst_addr + 25 * dst_stride_z) = out4.s1;
+ *(__global DATA_TYPE *)(dst_addr + 26 * dst_stride_z) = out4.s2;
+ *(__global DATA_TYPE *)(dst_addr + 27 * dst_stride_z) = out4.s3;
+ *(__global DATA_TYPE *)(dst_addr + 28 * dst_stride_z) = out4.s4;
+ *(__global DATA_TYPE *)(dst_addr + 29 * dst_stride_z) = out4.s5;
+ *(__global DATA_TYPE *)(dst_addr + 30 * dst_stride_z) = out5.s0;
+ *(__global DATA_TYPE *)(dst_addr + 31 * dst_stride_z) = out5.s1;
+ *(__global DATA_TYPE *)(dst_addr + 32 * dst_stride_z) = out5.s2;
+ *(__global DATA_TYPE *)(dst_addr + 33 * dst_stride_z) = out5.s3;
+ *(__global DATA_TYPE *)(dst_addr + 34 * dst_stride_z) = out5.s4;
+ *(__global DATA_TYPE *)(dst_addr + 35 * dst_stride_z) = out5.s5;
+#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+}
+
+/** This OpenCL kernel performs Winograd filter transform 5x5/5x1 or 1x5 when the data layout is NCHW and the output tile is 4x4/4x1 or 1x4
+ *
+ * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
+ *
+ * @note If this kernel is used to perform Winograd filter transform 5x1, -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd filter transform 1x5, -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_4x4_5x5_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DIM_Z);
+
+ const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
+
+ // Load the values from the input tensor
+#if defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ w00 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+ DATA_TYPE w01 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_y) + 4);
+#elif defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ w00 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
+ DATA_TYPE w01 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
+#else // defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ w00 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+ DATA_TYPE w01 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_y) + 4);
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ w10 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
+ DATA_TYPE w11 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y) + 4);
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ w20 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
+ DATA_TYPE w21 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y) + 4);
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ w30 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
+ DATA_TYPE w31 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y) + 4);
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ w40 = vload4(0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
+ DATA_TYPE w41 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_y) + 4);
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+
+ // Transform the input tile
+
+ // Row 0
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out0 = 0.0f;
+ out0.s0 = w00.s0;
+ out0.s1 = -2.f * (w00.s0 + w00.s1 + w00.s2 + w00.s3 + w01) / 9.f;
+ out0.s2 = -2.f * (w00.s0 - w00.s1 + w00.s2 - w00.s3 + w01) / 9.f;
+ out0.s3 = (w00.s0 + 2.f * w00.s1 + 4.f * w00.s2 + 8.f * w00.s3 + 16.f * w01) / 90.f;
+ out0.s4 = (w00.s0 - 2.f * w00.s1 + 4.f * w00.s2 - 8.f * w00.s3 + 16.f * w01) / 90.f;
+ out0.s5 = (16.f * w00.s0 + 8.f * w00.s1 + 4.f * w00.s2 + 2.f * w00.s3 + w01) / 180.f;
+ out0.s6 = (16.f * w00.s0 - 8.f * w00.s1 + 4.f * w00.s2 - 2.f * w00.s3 + w01) / 180.f;
+ out0.s7 = w01;
+
+#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ // Row 1
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out1 = 0.0f;
+ out1.s0 = -2.f * (w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) / 9.f;
+ out1.s1 = 4.f * ((w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) + (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) +
+ (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + (w01 + w11 + w21 + w31 + w41)) / 81.f;
+ out1.s2 = 4.f * ((w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) - (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) -
+ (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + (w01 + w11 + w21 + w31 + w41)) / 81.f;
+ out1.s3 = -((w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) + 2.f * (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + 4.f * (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) + 8.f *
+ (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + 16.f * (w01 + w11 + w21 + w31 + w41)) / 405.f;
+ out1.s4 = -((w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) - 2.f * (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + 4.f * (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) - 8.f *
+ (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + 16.f * (w01 + w11 + w21 + w31 + w41)) / 405.f;
+ out1.s5 = -(16.f * (w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) + 8.f * (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + 4.f * (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) + 2.f *
+ (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + (w01 + w11 + w21 + w31 + w41)) / 810.f;
+ out1.s6 = -(16.f * (w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) - 8.f * (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + 4.f * (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) - 2.f *
+ (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + (w01 + w11 + w21 + w31 + w41)) / 810.f;
+ out1.s7 = -2.f * (w01 + w11 + w21 + w31 + w41) / 9.f;
+
+ // Row 2
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out2 = 0.0f;
+ out2.s0 = -2.f * (w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) / 9.f;
+ out2.s1 = 4.f * ((w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) + (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) +
+ (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + (w01 - w11 + w21 - w31 + w41)) / 81.f;
+ out2.s2 = 4.f * ((w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) - (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) -
+ (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + (w01 - w11 + w21 - w31 + w41)) / 81.f;
+ out2.s3 = -((w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) + 2.f * (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + 4.f * (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) + 8.f *
+ (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + 16.f * (w01 - w11 + w21 - w31 + w41)) / 405.f;
+ out2.s4 = -((w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) - 2.f * (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + 4.f * (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) - 8.f *
+ (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + 16.f * (w01 - w11 + w21 - w31 + w41)) / 405.f;
+ out2.s5 = -(16.f * (w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) + 8.f * (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + 4.f * (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) + 2.f *
+ (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + (w01 - w11 + w21 - w31 + w41)) / 810.f;
+ out2.s6 = -(16.f * (w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) - 8.f * (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + 4.f * (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) - 2.f *
+ (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + (w01 - w11 + w21 - w31 + w41)) / 810.f;
+ out2.s7 = -2.f * (w01 - w11 + w21 - w31 + w41) / 9.f;
+
+ // Row 3
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out3 = 0.0f;
+ out3.s0 = (w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) / 90.f;
+ out3.s1 = -((w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) + (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) +
+ (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) + (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) +
+ (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 405.f;
+ out3.s2 = -((w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) - (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) +
+ (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) - (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) +
+ (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 405.f;
+ out3.s3 = ((w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) + 2.f * (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
+ (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) + 8.f * (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) + 16.f *
+ (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 8100.f;
+ out3.s4 = ((w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) - 2.f * (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
+ (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) - 8.f * (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) + 16.f *
+ (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 8100.f;
+ out3.s5 = (16.f * (w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) + 8.f * (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
+ (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) + 2.f * (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) +
+ (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 16200.f;
+ out3.s6 = (16.f * (w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) - 8.f * (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
+ (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) - 2.f * (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) +
+ (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 16200.f;
+ out3.s7 = (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41) / 90.f;
+
+ // Row 4
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out4 = 0.0f;
+ out4.s0 = (w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) / 90.f;
+ out4.s1 = -((w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) + (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) +
+ (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) + (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) +
+ (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 405.f;
+ out4.s2 = -((w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) - (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) +
+ (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) - (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) +
+ (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 405.f;
+ out4.s3 = ((w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) + 2.f * (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
+ (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) + 8.f * (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) + 16.f *
+ (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 8100.f;
+ out4.s4 = ((w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) - 2.f * (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
+ (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) - 8.f * (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) + 16.f *
+ (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 8100.f;
+ out4.s5 = (16.f * (w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) + 8.f * (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
+ (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) + 2.f * (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) +
+ (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 16200.f;
+ out4.s6 = (16.f * (w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) - 8.f * (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
+ (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) - 2.f * (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) +
+ (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 16200.f;
+ out4.s7 = (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41) / 90.f;
+
+ // Row 5
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out5 = 0.0f;
+ out5.s0 = (16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) / 180.f;
+ out5.s1 = -((16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) + (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) +
+ (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) + (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) +
+ (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 810.f;
+ out5.s2 = -((16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) - (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) +
+ (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) - (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) +
+ (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 810.f;
+ out5.s3 = ((16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) + 2.f * (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) + 4.f *
+ (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) + 8.f * (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) + 16.f *
+ (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 16200.f;
+ out5.s4 = ((16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) - 2.f * (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) + 4.f *
+ (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) - 8.f * (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) + 16.f *
+ (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 16200.f;
+ out5.s5 = (16.f * (16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) + 8.f * (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) + 4.f *
+ (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) + 2.f * (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) +
+ (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 32400.f;
+ out5.s6 = (16.f * (16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) - 8.f * (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) + 4.f *
+ (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) - 2.f * (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) +
+ (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 32400.f;
+ out5.s7 = (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41) / 180.f;
+
+ // Row 6
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out6 = 0.0f;
+ out6.s0 = (16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) / 180.f;
+ out6.s1 = -((16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) + (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) +
+ (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) + (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) +
+ (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 810.f;
+ out6.s2 = -((16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) - (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) +
+ (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) - (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) +
+ (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 810.f;
+ out6.s3 = ((16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) + 2.f * (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) + 4.f *
+ (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) + 8.f * (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) + 16.f *
+ (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 16200.f;
+ out6.s4 = ((16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) - 2.f * (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) + 4.f *
+ (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) - 8.f * (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) + 16.f *
+ (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 16200.f;
+ out6.s5 = (16.f * (16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) + 8.f * (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) + 4.f *
+ (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) + 2.f * (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) +
+ (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 32400.f;
+ out6.s6 = (16.f * (16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) - 8.f * (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) + 4.f *
+ (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) - 2.f * (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) +
+ (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 32400.f;
+ out6.s7 = (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41) / 180.f;
+
+ // Row 7
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out7 = 0.0f;
+ out7.s0 = w40.s0;
+ out7.s1 = -2.f * (w40.s0 + w40.s1 + w40.s2 + w40.s3 + w41) / 9.f;
+ out7.s2 = -2.f * (w40.s0 - w40.s1 + w40.s2 - w40.s3 + w41) / 9.f;
+ out7.s3 = (w40.s0 + 2.f * w40.s1 + 4.f * w40.s2 + 8.f * w40.s3 + 16.f * w41) / 90.f;
+ out7.s4 = (w40.s0 - 2.f * w40.s1 + 4.f * w40.s2 - 8.f * w40.s3 + 16.f * w41) / 90.f;
+ out7.s5 = (16.f * w40.s0 + 8.f * w40.s1 + 4.f * w40.s2 + 2.f * w40.s3 + w41) / 180.f;
+ out7.s6 = (16.f * w40.s0 - 8.f * w40.s1 + 4.f * w40.s2 - 2.f * w40.s3 + w41) / 180.f;
+ out7.s7 = w41;
+#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+
+ int z = get_global_id(2);
+ int x0 = z / SRC_DIM_Z; // idx filter
+ int y0 = z % SRC_DIM_Z; // idx channel
+
+ // Get output address
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x0 * sizeof(DATA_TYPE) + y0 * dst_stride_y;
+
+ // Store the values across the channels
+ *(__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z) = out0.s0;
+ *(__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z) = out0.s1;
+ *(__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z) = out0.s2;
+ *(__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z) = out0.s3;
+ *(__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z) = out0.s4;
+ *(__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z) = out0.s5;
+ *(__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z) = out0.s6;
+ *(__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z) = out0.s7;
+
+#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+ *(__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z) = out1.s0;
+ *(__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z) = out1.s1;
+ *(__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z) = out1.s2;
+ *(__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z) = out1.s3;
+ *(__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z) = out1.s4;
+ *(__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z) = out1.s5;
+ *(__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z) = out1.s6;
+ *(__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z) = out1.s7;
+ *(__global DATA_TYPE *)(dst_addr + 16 * dst_stride_z) = out2.s0;
+ *(__global DATA_TYPE *)(dst_addr + 17 * dst_stride_z) = out2.s1;
+ *(__global DATA_TYPE *)(dst_addr + 18 * dst_stride_z) = out2.s2;
+ *(__global DATA_TYPE *)(dst_addr + 19 * dst_stride_z) = out2.s3;
+ *(__global DATA_TYPE *)(dst_addr + 20 * dst_stride_z) = out2.s4;
+ *(__global DATA_TYPE *)(dst_addr + 21 * dst_stride_z) = out2.s5;
+ *(__global DATA_TYPE *)(dst_addr + 22 * dst_stride_z) = out2.s6;
+ *(__global DATA_TYPE *)(dst_addr + 23 * dst_stride_z) = out2.s7;
+ *(__global DATA_TYPE *)(dst_addr + 24 * dst_stride_z) = out3.s0;
+ *(__global DATA_TYPE *)(dst_addr + 25 * dst_stride_z) = out3.s1;
+ *(__global DATA_TYPE *)(dst_addr + 26 * dst_stride_z) = out3.s2;
+ *(__global DATA_TYPE *)(dst_addr + 27 * dst_stride_z) = out3.s3;
+ *(__global DATA_TYPE *)(dst_addr + 28 * dst_stride_z) = out3.s4;
+ *(__global DATA_TYPE *)(dst_addr + 29 * dst_stride_z) = out3.s5;
+ *(__global DATA_TYPE *)(dst_addr + 30 * dst_stride_z) = out3.s6;
+ *(__global DATA_TYPE *)(dst_addr + 31 * dst_stride_z) = out3.s7;
+ *(__global DATA_TYPE *)(dst_addr + 32 * dst_stride_z) = out4.s0;
+ *(__global DATA_TYPE *)(dst_addr + 33 * dst_stride_z) = out4.s1;
+ *(__global DATA_TYPE *)(dst_addr + 34 * dst_stride_z) = out4.s2;
+ *(__global DATA_TYPE *)(dst_addr + 35 * dst_stride_z) = out4.s3;
+ *(__global DATA_TYPE *)(dst_addr + 36 * dst_stride_z) = out4.s4;
+ *(__global DATA_TYPE *)(dst_addr + 37 * dst_stride_z) = out4.s5;
+ *(__global DATA_TYPE *)(dst_addr + 38 * dst_stride_z) = out4.s6;
+ *(__global DATA_TYPE *)(dst_addr + 39 * dst_stride_z) = out4.s7;
+ *(__global DATA_TYPE *)(dst_addr + 40 * dst_stride_z) = out5.s0;
+ *(__global DATA_TYPE *)(dst_addr + 41 * dst_stride_z) = out5.s1;
+ *(__global DATA_TYPE *)(dst_addr + 42 * dst_stride_z) = out5.s2;
+ *(__global DATA_TYPE *)(dst_addr + 43 * dst_stride_z) = out5.s3;
+ *(__global DATA_TYPE *)(dst_addr + 44 * dst_stride_z) = out5.s4;
+ *(__global DATA_TYPE *)(dst_addr + 45 * dst_stride_z) = out5.s5;
+ *(__global DATA_TYPE *)(dst_addr + 46 * dst_stride_z) = out5.s6;
+ *(__global DATA_TYPE *)(dst_addr + 47 * dst_stride_z) = out5.s7;
+ *(__global DATA_TYPE *)(dst_addr + 48 * dst_stride_z) = out6.s0;
+ *(__global DATA_TYPE *)(dst_addr + 49 * dst_stride_z) = out6.s1;
+ *(__global DATA_TYPE *)(dst_addr + 50 * dst_stride_z) = out6.s2;
+ *(__global DATA_TYPE *)(dst_addr + 51 * dst_stride_z) = out6.s3;
+ *(__global DATA_TYPE *)(dst_addr + 52 * dst_stride_z) = out6.s4;
+ *(__global DATA_TYPE *)(dst_addr + 53 * dst_stride_z) = out6.s5;
+ *(__global DATA_TYPE *)(dst_addr + 54 * dst_stride_z) = out6.s6;
+ *(__global DATA_TYPE *)(dst_addr + 55 * dst_stride_z) = out6.s7;
+ *(__global DATA_TYPE *)(dst_addr + 56 * dst_stride_z) = out7.s0;
+ *(__global DATA_TYPE *)(dst_addr + 57 * dst_stride_z) = out7.s1;
+ *(__global DATA_TYPE *)(dst_addr + 58 * dst_stride_z) = out7.s2;
+ *(__global DATA_TYPE *)(dst_addr + 59 * dst_stride_z) = out7.s3;
+ *(__global DATA_TYPE *)(dst_addr + 60 * dst_stride_z) = out7.s4;
+ *(__global DATA_TYPE *)(dst_addr + 61 * dst_stride_z) = out7.s5;
+ *(__global DATA_TYPE *)(dst_addr + 62 * dst_stride_z) = out7.s6;
+ *(__global DATA_TYPE *)(dst_addr + 63 * dst_stride_z) = out7.s7;
+#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+}
+
+#endif // defined(SRC_DIM_Z)
+
+#if defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+/** This OpenCL kernel performs Winograd filter transform 3x1 when the data layout is NCHW and the output tile is 2x1
+ *
+ * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
+ * @note -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time to perform Winograd Filter Transform
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_2x1_3x1_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ winograd_filter_transform_2x2_3x3_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes);
+}
+
+/** This OpenCL kernel performs Winograd filter transform 3x1 when the data layout is NCHW and the output tile is 4x1
+ *
+ * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
+ * @note -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time to perform Winograd Filter Transform
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_4x1_3x1_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ winograd_filter_transform_4x4_3x3_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes);
+}
+
+/** This OpenCL kernel performs Winograd filter transform 5x1 when the data layout is NCHW and the output tile is 4x1
+ *
+ * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
+ * @note -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time to perform Winograd Filter Transform
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_4x1_5x1_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ winograd_filter_transform_4x4_5x5_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes);
+}
+
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+
+#if defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
+/** This OpenCL kernel performs Winograd filter transform 1x3 when the data layout is NCHW and the output tile is 1x2
+ *
+ * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
+ * @note -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time to perform Winograd Filter Transform
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_1x2_1x3_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ winograd_filter_transform_2x2_3x3_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes);
+}
+
+/** This OpenCL kernel performs Winograd filter transform 1x3 when the data layout is NCHW and the output tile is 1x4
+ *
+ * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
+ * @note -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time to perform Winograd Filter Transform
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_1x4_1x3_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ winograd_filter_transform_4x4_3x3_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes);
+}
+
+/** This OpenCL kernel performs Winograd filter transform 1x5 when the data layout is NCHW and the output tile is 1x4
+ *
+ * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
+ * @note -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time to perform Winograd Filter Transform
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_1x4_1x5_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ winograd_filter_transform_4x4_5x5_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes);
+}
+
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
diff --git a/src/core/CL/cl_kernels/nchw/winograd_input_transform.cl b/src/core/CL/cl_kernels/nchw/winograd_input_transform.cl
new file mode 100644
index 0000000000..8c382183c3
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/winograd_input_transform.cl
@@ -0,0 +1,1346 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#define OUTPUT_ROW_4x4_5x5(out, tmp, comm_fact) \
+ ({ \
+ comm_fact.s0 = tmp.s2 - 4.25f * tmp.s4 + tmp.s6; \
+ comm_fact.s1 = tmp.s1 - 4.25f * tmp.s3 + tmp.s5; \
+ comm_fact.s2 = 2.5f * tmp.s3; \
+ comm_fact.s3 = 0.5f * tmp.s1 + 2.f * tmp.s5 - comm_fact.s2; \
+ comm_fact.s4 = 0.25f * tmp.s2 - 1.25f * tmp.s4 + tmp.s6; \
+ comm_fact.s5 = 4.f * tmp.s2 + tmp.s6 - 5.f * tmp.s4; \
+ comm_fact.s6 = 2.f * tmp.s1 + 0.5f * tmp.s5 - comm_fact.s2; \
+ \
+ out.s0 = tmp.s0 - tmp.s6 + 5.25f * tmp.s4 - 5.25f * tmp.s2; \
+ out.s1 = comm_fact.s0 + comm_fact.s1; \
+ out.s2 = comm_fact.s0 - comm_fact.s1; \
+ out.s3 = comm_fact.s3 + comm_fact.s4; \
+ out.s4 = comm_fact.s4 - comm_fact.s3; \
+ out.s5 = comm_fact.s5 + comm_fact.s6; \
+ out.s6 = comm_fact.s5 - comm_fact.s6; \
+ out.s7 = tmp.s7 - tmp.s1 + 5.25f * tmp.s3 - 5.25f * tmp.s5; \
+ })
+
+#define OUTPUT_ROW_2x2_7x7(out, tmp, comm_fact) \
+ ({ \
+ comm_fact.s0 = 36.0f * tmp.s2 - 13.0f * tmp.s4 + tmp.s6; \
+ comm_fact.s1 = 36.0f * tmp.s1 - 13.0f * tmp.s3 + 1.0f * tmp.s5; \
+ comm_fact.s2 = 9.0f * tmp.s2 - 10.0f * tmp.s4 + tmp.s6; \
+ comm_fact.s3 = 18.0f * tmp.s1 - 20.0f * tmp.s3 + 2.0f * tmp.s5; \
+ comm_fact.s4 = 4.0f * tmp.s2 - 5.0f * tmp.s4 + tmp.s6; \
+ comm_fact.s5 = 12.0f * tmp.s1 - 15.0f * tmp.s3 + 3.0f * tmp.s5; \
+ out.s0 = -36.0f * tmp.s0 + 49.0f * tmp.s2 + -14.0f * tmp.s4 + tmp.s6; \
+ out.s1 = comm_fact.s0 - comm_fact.s1; \
+ out.s2 = comm_fact.s0 + comm_fact.s1; \
+ out.s3 = comm_fact.s2 - comm_fact.s3; \
+ out.s4 = comm_fact.s2 + comm_fact.s3; \
+ out.s5 = comm_fact.s4 - comm_fact.s5; \
+ out.s6 = comm_fact.s4 + comm_fact.s5; \
+ out.s7 = -36.0f * tmp.s1 + 0.0f * tmp.s2 + 49.0f * tmp.s3 - 14.0f * tmp.s5 + tmp.s7; \
+ })
+
+#if defined(NUM_TILES_X) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
+/** This OpenCL kernel computes the input transform when the kernel size is 3x3/3x1 or 1x3 and the output tile is 2x2/2x1 or 1x2
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_2x2_3x3_stepz1_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+#if defined(SRC_DEPTH)
+ const int z = get_global_id(2) % SRC_DEPTH;
+ const int b = get_global_id(2) / SRC_DEPTH;
+#else /* defined(SRC_DEPTH) */
+ const int z = get_global_id(2);
+#endif /* defined(SRC_DEPTH) */
+
+ // Compute input address
+#if defined(SRC_DEPTH)
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z + b * src_stride_w;
+#else /* defined(SRC_DEPTH) */
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z;
+#endif /* defined(SRC_DEPTH) */
+
+ src_addr = src_addr - ((int)PAD_LEFT * sizeof(DATA_TYPE)) - ((int)PAD_TOP * src_stride_y);
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row0 = vload4(0, (__global DATA_TYPE *)(src_addr));
+#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
+#else // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row0 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row1 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row2 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row3 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp0 = in_row0;
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ tmp0 -= in_row2;
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ DATA_TYPE out00 = tmp0.s0 - tmp0.s2;
+ DATA_TYPE out01 = tmp0.s1 + tmp0.s2;
+ DATA_TYPE out02 = tmp0.s2 - tmp0.s1;
+ DATA_TYPE out03 = tmp0.s1 - tmp0.s3;
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp1 = in_row1 + in_row2;
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp2 = in_row2 - in_row1;
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp3 = in_row1 - in_row3;
+
+ DATA_TYPE out10 = tmp1.s0 - tmp1.s2;
+ DATA_TYPE out11 = tmp1.s1 + tmp1.s2;
+ DATA_TYPE out12 = tmp1.s2 - tmp1.s1;
+ DATA_TYPE out13 = tmp1.s1 - tmp1.s3;
+
+ DATA_TYPE out20 = tmp2.s0 - tmp2.s2;
+ DATA_TYPE out21 = tmp2.s1 + tmp2.s2;
+ DATA_TYPE out22 = tmp2.s2 - tmp2.s1;
+ DATA_TYPE out23 = tmp2.s1 - tmp2.s3;
+
+ DATA_TYPE out30 = tmp3.s0 - tmp3.s2;
+ DATA_TYPE out31 = tmp3.s1 + tmp3.s2;
+ DATA_TYPE out32 = tmp3.s2 - tmp3.s1;
+ DATA_TYPE out33 = tmp3.s1 - tmp3.s3;
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+#if defined(SRC_DEPTH)
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w;
+#else /* defined(SRC_DEPTH) */
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y;
+#endif /* defined(SRC_DEPTH) */
+
+ *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z)) = out00; // in_row0.s0; out00;
+ *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z)) = out01; // in_row0.s1; out01;
+ *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z)) = out02; // in_row0.s2; out02;
+ *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z)) = out03; // in_row0.s3; out03;
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ *((__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z)) = out10;
+ *((__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z)) = out11;
+ *((__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z)) = out12;
+ *((__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z)) = out13;
+ *((__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z)) = out20;
+ *((__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z)) = out21;
+ *((__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z)) = out22;
+ *((__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z)) = out23;
+ *((__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z)) = out30;
+ *((__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z)) = out31;
+ *((__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z)) = out32;
+ *((__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z)) = out33;
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+}
+
+/** This OpenCL kernel computes the input transform when the kernel size is 3x3/3x1 or 1x3, the output tile is 2x2/2x1 or 1x2 and the number of channels is multiple of 2
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_2x2_3x3_stepz2_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+#if defined(SRC_DEPTH)
+ const int z = (get_global_id(2) * 2) % SRC_DEPTH;
+ const int b = (get_global_id(2) * 2) / SRC_DEPTH;
+#else /* defined(SRC_DEPTH) */
+ const int z = get_global_id(2) * 2;
+#endif /* defined(SRC_DEPTH) */
+
+ // Compute input address
+#if defined(SRC_DEPTH)
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z + b * src_stride_w;
+#else /* defined(SRC_DEPTH) */
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z;
+#endif /* defined(SRC_DEPTH) */
+ src_addr = src_addr - ((int)PAD_LEFT * sizeof(DATA_TYPE)) - ((int)PAD_TOP * src_stride_y);
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row0 = vload4(0, (__global DATA_TYPE *)(src_addr));
+#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
+#else // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row0 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row1 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row2 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row3 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ src_addr += src_stride_z;
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row4 = vload4(0, (__global DATA_TYPE *)(src_addr));
+#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row4 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
+#else // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row4 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row5 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row6 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ in_row7 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp0 = in_row0;
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp4 = in_row4;
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ tmp0 -= in_row2;
+ tmp4 -= in_row6;
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out00 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp0.s0 - tmp0.s2, tmp4.s0 - tmp4.s2);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out01 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp0.s1 + tmp0.s2, tmp4.s1 + tmp4.s2);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out02 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp0.s2 - tmp0.s1, tmp4.s2 - tmp4.s1);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out03 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp0.s1 - tmp0.s3, tmp4.s1 - tmp4.s3);
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp1 = in_row1 + in_row2;
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp2 = in_row2 - in_row1;
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp3 = in_row1 - in_row3;
+
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp5 = in_row5 + in_row6;
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp6 = in_row6 - in_row5;
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp7 = in_row5 - in_row7;
+
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out10 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp1.s0 - tmp1.s2, tmp5.s0 - tmp5.s2);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out11 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp1.s1 + tmp1.s2, tmp5.s1 + tmp5.s2);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out12 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp1.s2 - tmp1.s1, tmp5.s2 - tmp5.s1);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out13 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp1.s1 - tmp1.s3, tmp5.s1 - tmp5.s3);
+
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out20 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp2.s0 - tmp2.s2, tmp6.s0 - tmp6.s2);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out21 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp2.s1 + tmp2.s2, tmp6.s1 + tmp6.s2);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out22 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp2.s2 - tmp2.s1, tmp6.s2 - tmp6.s1);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out23 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp2.s1 - tmp2.s3, tmp6.s1 - tmp6.s3);
+
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out30 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp3.s0 - tmp3.s2, tmp7.s0 - tmp7.s2);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out31 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp3.s1 + tmp3.s2, tmp7.s1 + tmp7.s2);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out32 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp3.s2 - tmp3.s1, tmp7.s2 - tmp7.s1);
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out33 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp3.s1 - tmp3.s3, tmp7.s1 - tmp7.s3);
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+#if defined(SRC_DEPTH)
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w;
+#else /* defined(SRC_DEPTH) */
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y;
+#endif /* defined(SRC_DEPTH) */
+
+ vstore2(out00, 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z));
+ vstore2(out01, 0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z));
+ vstore2(out02, 0, (__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z));
+ vstore2(out03, 0, (__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z));
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ vstore2(out10, 0, (__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z));
+ vstore2(out11, 0, (__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z));
+ vstore2(out12, 0, (__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z));
+ vstore2(out13, 0, (__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z));
+ vstore2(out20, 0, (__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z));
+ vstore2(out21, 0, (__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z));
+ vstore2(out22, 0, (__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z));
+ vstore2(out23, 0, (__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z));
+ vstore2(out30, 0, (__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z));
+ vstore2(out31, 0, (__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z));
+ vstore2(out32, 0, (__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z));
+ vstore2(out33, 0, (__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z));
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+}
+
+/** This OpenCL kernel computes the input transform when the output tile is 4x4/4x1 or 1x4, the filter size 3x3/3x1 or 1x3 and the data layout is NCHW
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_4x4_3x3_stepz1_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+#if defined(SRC_DEPTH)
+ const int z = get_global_id(2) % SRC_DEPTH;
+ const int b = get_global_id(2) / SRC_DEPTH;
+#else /* defined(SRC_DEPTH) */
+ const int z = get_global_id(2);
+#endif /* defined(SRC_DEPTH) */
+
+ // Compute input address
+#if defined(SRC_DEPTH)
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z + b * src_stride_w;
+#else /* defined(SRC_DEPTH) */
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z;
+#endif /* defined(SRC_DEPTH) */
+
+ src_addr = src_addr - ((int)PAD_LEFT * sizeof(DATA_TYPE)) - ((int)PAD_TOP * src_stride_y);
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ // Row0
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ d00 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ d01 = (VEC_DATA_TYPE(DATA_TYPE, 2))(*((__global DATA_TYPE *)(src_addr + 4 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 5 * src_stride_y)));
+#else // defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ // Row0
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ d00 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ d01 = vload2(2, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ DATA_TYPE out0 = 0.0f;
+ DATA_TYPE out1 = 0.0f;
+ DATA_TYPE out2 = 0.0f;
+ DATA_TYPE out3 = 0.0f;
+ DATA_TYPE out4 = 0.0f;
+ DATA_TYPE out5 = 0.0f;
+
+ // Channels [0, 5]: [out00, out01, out02, out03, out04, out05]
+ out0 += 16.0f * d00.s0 - 20.0f * d00.s2 + 4.0f * d01.s0;
+ out1 += -16.0f * d00.s1 - 16.0f * d00.s2 + 4.0f * d00.s3 + 4.0f * d01.s0;
+ out2 += 16.0f * d00.s1 - 16.0f * d00.s2 - 4.0f * d00.s3 + 4.0f * d01.s0;
+ out3 += -8.0f * d00.s1 - 4.0f * d00.s2 + 8.0f * d00.s3 + 4.0f * d01.s0;
+ out4 += 8.0f * d00.s1 - 4.0f * d00.s2 - 8.0f * d00.s3 + 4.0f * d01.s0;
+ out5 += 16.0f * d00.s1 - 20.0f * d00.s3 + 4.0f * d01.s1;
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ // Row4
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ d40 = vload4(0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ d41 = vload2(2, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
+
+ // k0, k1, k2, k3, k4, k5 are common terms for row0, row1, row2, row3 and row4
+ DATA_TYPE k0 = d41.s0;
+ DATA_TYPE k1 = d41.s0;
+ DATA_TYPE k2 = d41.s0;
+ DATA_TYPE k3 = d41.s0;
+ DATA_TYPE k4 = d41.s0;
+ DATA_TYPE k5 = 0.0f;
+
+ k0 += 4.0f * d40.s0 - 5.0f * d40.s2;
+ k1 += -4.0f * d40.s1 - 4.0f * d40.s2 + d40.s3;
+ k2 += 4.0f * d40.s1 - 4.0f * d40.s2 - d40.s3;
+ k3 += -2.0f * d40.s1 + 2.0f * d40.s3 - d40.s2;
+ k4 += 2.0f * d40.s1 - 2.0f * d40.s3 - d40.s2;
+ k5 += 4.0f * d40.s1 - 5.0f * d40.s3 + d41.s1;
+
+ out0 += k0;
+ out1 += k1;
+ out2 += k2;
+ out3 += k3;
+ out4 += k4;
+ out5 += k5;
+
+ // Row2
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ d20 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ d21 = vload2(2, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
+
+ out0 += -20.0f * d20.s0 + 25.0f * d20.s2 - 5.0f * d21.s0;
+ out1 += +20.0f * d20.s1 + 20.0f * d20.s2 - 5.0f * d20.s3 - 5.0f * d21.s0;
+ out2 += -20.0f * d20.s1 + 20.0f * d20.s2 + 5.0f * d20.s3 - 5.0f * d21.s0;
+ out3 += +10.0f * d20.s1 + 5.0f * d20.s2 - 10.0f * d20.s3 - 5.0f * d21.s0;
+ out4 += -10.0f * d20.s1 + 5.0f * d20.s2 + 10.0f * d20.s3 - 5.0f * d21.s0;
+ out5 += -20.0f * d20.s1 + 25.0f * d20.s3 - 5.0f * d21.s1;
+#endif // #if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ // Compute destination address
+#if defined(SRC_DEPTH)
+ __global DATA_TYPE *dst_addr = (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w);
+#else /* defined(SRC_DEPTH) */
+ __global DATA_TYPE *dst_addr = (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y);
+#endif /* defined(SRC_DEPTH) */
+
+ uint dst_plane_stride = dst_stride_z / sizeof(DATA_TYPE);
+
+ *(dst_addr) = out0;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out1;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out2;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out3;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out4;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out5;
+ dst_addr += dst_plane_stride;
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ DATA_TYPE out6 = k0;
+ DATA_TYPE out7 = k1;
+ DATA_TYPE out8 = k2;
+ DATA_TYPE out9 = k3;
+ DATA_TYPE out10 = k4;
+ DATA_TYPE out11 = k5;
+ DATA_TYPE out12 = k0;
+ DATA_TYPE out13 = k1;
+ DATA_TYPE out14 = k2;
+ DATA_TYPE out15 = k3;
+ DATA_TYPE out16 = k4;
+ DATA_TYPE out17 = k5;
+ DATA_TYPE out18 = k0;
+ DATA_TYPE out19 = k1;
+ DATA_TYPE out20 = k2;
+ DATA_TYPE out21 = k3;
+ DATA_TYPE out22 = k4;
+ DATA_TYPE out23 = k5;
+ DATA_TYPE out24 = k0;
+ DATA_TYPE out25 = k1;
+ DATA_TYPE out26 = k2;
+ DATA_TYPE out27 = k3;
+ DATA_TYPE out28 = k4;
+ DATA_TYPE out29 = k5;
+
+ // Row1
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ d10 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ d11 = vload2(2, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
+
+ // Row3
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ d30 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ d31 = vload2(2, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
+
+ // Compute common parts for the channels between [6, 29]
+ // Channels [6, 11]: [out10, out11, out12, out13, out14, out15]
+ // Channels [12, 17]: [out20, out21, out22, out23, out24, out25]
+ DATA_TYPE part0 = -16.0f * d20.s0 + 20.0f * d20.s2 - 4.0f * d21.s0;
+ DATA_TYPE part1 = 16.0f * d10.s0 - 20.0f * d10.s2 + 4.0f * d11.s0 - 4.0f * d30.s0 + 5.0f * d30.s2 - d31.s0;
+ DATA_TYPE part2 = 16.0f * d20.s2 - 4.0f * d21.s0;
+ DATA_TYPE part3 = 16.0f * d20.s1 - 4.0f * d20.s3;
+ DATA_TYPE part4 = 16.0f * d10.s2 - 4.0f * d11.s0 - 4.0f * d30.s2 + d31.s0;
+ DATA_TYPE part5 = 16.0f * d10.s1 - 4.0f * d10.s3 - 4.0f * d30.s1 + d30.s3;
+ DATA_TYPE part6 = 4.0f * d20.s2 - 4.0f * d21.s0;
+ DATA_TYPE part7 = 8.0f * d10.s1 - 8.0f * d10.s3 - 2.0f * d30.s1 + 2.0f * d30.s3;
+ DATA_TYPE part8 = 4.0f * d10.s2 - 4.0f * d11.s0 - d30.s2 + d31.s0;
+ DATA_TYPE part9 = 8.0f * d20.s1 - 8.0f * d20.s3;
+ DATA_TYPE part10 = -16.0f * d20.s1 + 20.0f * d20.s3 - 4.0f * d21.s1;
+ DATA_TYPE part11 = -16.0f * d10.s1 + 20.0f * d10.s3 - 4.0f * d11.s1 + 4.0f * d30.s1 - 5.0f * d30.s3 + d31.s1;
+
+ // Channels [18, 23]: [out30, out31, out32, out33, out34, out35]
+ // Channels [24, 29]: [out40, out41, out42, out43, out44, out45]
+ DATA_TYPE part12 = 8.0f * d10.s0 - 10.0f * d10.s2 + 2.0f * d11.s0 - 8.0f * d30.s0 + 10.0f * d30.s2 - 2.0f * d31.s0;
+ DATA_TYPE part13 = part0 * 0.25f; // -4.0f * d20.s0 + 5.0f * d20.s2 - d21.s0
+ DATA_TYPE part14 = part2 * 0.25f; // 4.0f * d20.s2 - d21.s0
+ DATA_TYPE part15 = 8.0f * d10.s1 - 2.0f * d10.s3 - 8.0f * d30.s1 + 2.0f * d30.s3;
+ DATA_TYPE part16 = 8.0f * d10.s2 - 2.0f * d11.s0 - 8.0f * d30.s2 + 2.0f * d31.s0;
+ DATA_TYPE part17 = part3 * 0.25f; // 4.0f * d20.s1 - d20.s3
+ DATA_TYPE part18 = part6 * 0.25f; // d20.s2 - d21.s0
+ DATA_TYPE part19 = 4.0f * d10.s1 - 4.0f * d10.s3 - 4.0f * d30.s1 + 4.0f * d30.s3;
+ DATA_TYPE part20 = 2.0f * d10.s2 - 2.0f * d11.s0 - 2.0f * d30.s2 + 2.0f * d31.s0;
+ DATA_TYPE part21 = part9 * 0.25f; // 2.0f * (d20.s1 - d20.s3)
+ DATA_TYPE part22 = part10 * 0.25f; // - 4.0f * d20.s1 + 5.0f * d20.s3 - d21.s1
+ DATA_TYPE part23 = part11 * 0.5f + 6.0f * d30.s1 - 7.5f * d30.s3 + 1.5f * d31.s1; // - 8.0f * d10.s1 + 10.0f * d10.s3 - 2.0f * d11.s1 + 8.0f * d30.s1 - 10.0f * d30.s3 + 2.0f * d31.s1;
+
+ out6 += part0 - part1;
+ out12 += part0 + part1;
+ out7 += part2 + part3 + part4 + part5;
+ out8 += part2 - part3 + part4 - part5;
+ out13 += part2 + part3 - part4 - part5;
+ out14 += part2 - part3 - part4 + part5;
+ out9 += part6 + part7 + part8 + part9;
+ out10 += part6 - part7 + part8 - part9;
+ out15 += part6 - part7 - part8 + part9;
+ out16 += part6 + part7 - part8 - part9;
+ out11 += part10 + part11;
+ out17 += part10 - part11;
+
+ out18 += part13 - part12;
+ out24 += part13 + part12;
+ out19 += part14 + part15 + part16 + part17;
+ out20 += part14 - part15 + part16 - part17;
+ out25 += part14 - part15 - part16 + part17;
+ out26 += part14 + part15 - part16 - part17;
+ out21 += part18 + part19 + part20 + part21;
+ out22 += part18 - part19 + part20 - part21;
+ out27 += part18 - part19 - part20 + part21;
+ out28 += part18 + part19 - part20 - part21;
+ out23 += part22 + part23;
+ out29 += part22 - part23;
+
+ *(dst_addr) = out6;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out7;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out8;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out9;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out10;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out11;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out12;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out13;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out14;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out15;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out16;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out17;
+ dst_addr += dst_plane_stride;
+
+ *(dst_addr) = out18;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out19;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out20;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out21;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out22;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out23;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out24;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out25;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out26;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out27;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out28;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out29;
+ dst_addr += dst_plane_stride;
+
+ // Row5
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ d50 = vload4(0, (__global DATA_TYPE *)(src_addr + 5 * src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ d51 = vload2(2, (__global DATA_TYPE *)(src_addr + 5 * src_stride_y));
+
+ // Channels [30, 35]
+ out0 = 16.0f * d10.s0 - 20.0f * d10.s2 - 20.0f * d30.s0 + 25.0f * d30.s2 + 4.0f * d50.s0 - 5.0f * d50.s2 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
+ out1 = -16.0f * d10.s1 - 16.0f * d10.s2 + 4.0f * d10.s3 + 20.0f * d30.s1 + 20.0f * d30.s2 - 5.0f * d30.s3 - 4.0f * d50.s1 - 4.0f * d50.s2 + d50.s3 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
+ out2 = 16.0f * d10.s1 - 16.0f * d10.s2 - 4.0f * d10.s3 - 20.0f * d30.s1 + 20.0f * d30.s2 + 5.0f * d30.s3 + 4.0f * d50.s1 - 4.0f * d50.s2 - d50.s3 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
+ out3 = -8.0f * d10.s1 - 4.0f * d10.s2 + 8.0f * d10.s3 + 10.0f * d30.s1 - 10.0f * d30.s3 + 5.0f * d30.s2 - 2.0f * d50.s1 + 2.0f * d50.s3 - d50.s2 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
+ out4 = 8.0f * d10.s1 - 4.0f * d10.s2 - 8.0f * d10.s3 - 10.0f * d30.s1 + 5.0f * d30.s2 + 10.0f * d30.s3 + 2.0f * d50.s1 - 2.0f * d50.s3 - d50.s2 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
+ out5 = 16.0f * d10.s1 - 20.0f * d10.s3 + 4.0f * d11.s1 - 20.0f * d30.s1 + 25.0f * d30.s3 - 5.0f * d31.s1 + 4.0f * d50.s1 - 5.0f * d50.s3 + d51.s1;
+
+ *(dst_addr) = out0;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out1;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out2;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out3;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out4;
+ dst_addr += dst_plane_stride;
+ *(dst_addr) = out5;
+ dst_addr += dst_plane_stride;
+#endif // #if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+}
+
+/** This OpenCL kernel computes the input transform when the kernel size is 5x5/5x1 or 1x5 and the output tile is 4x4/4x1 or 1x4 when the data layout is NCHW
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note If this kernel is used to perform Winograd input transform 5x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x5, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_4x4_5x5_stepz1_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+#if defined(SRC_DEPTH)
+ const int z = get_global_id(2) % SRC_DEPTH;
+ const int b = get_global_id(2) / SRC_DEPTH;
+#else /* defined(SRC_DEPTH) */
+ const int z = get_global_id(2);
+#endif /* defined(SRC_DEPTH) */
+
+ // Compute input address
+#if defined(SRC_DEPTH)
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z + b * src_stride_w;
+#else /* defined(SRC_DEPTH) */
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z;
+#endif /* defined(SRC_DEPTH) */
+ src_addr = src_addr - ((int)PAD_LEFT * sizeof(DATA_TYPE)) - ((int)PAD_TOP * src_stride_y);
+
+ // Load input tile
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row0 = vload8(0, (__global DATA_TYPE *)(src_addr));
+#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row0 = (VEC_DATA_TYPE(DATA_TYPE, 8))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 4 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 5 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 6 * src_stride_y)),
+ *((__global DATA_TYPE *)(src_addr + 7 * src_stride_y)));
+#else // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row0 = vload8(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row1 = vload8(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row2 = vload8(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row3 = vload8(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row4 = vload8(0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row5 = vload8(0, (__global DATA_TYPE *)(src_addr + 5 * src_stride_y));
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row6 = vload8(0, (__global DATA_TYPE *)(src_addr + 6 * src_stride_y));
+ const VEC_DATA_TYPE(DATA_TYPE, 8) in_row7 = vload8(0, (__global DATA_TYPE *)(src_addr + 7 * src_stride_y));
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ // Calculate common factors for intermediate tensor
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ tmp0 = in_row0;
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ comm_fact0 = 0.0f;
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ comm_fact0 += in_row2 + in_row6 - (DATA_TYPE)4.25f * in_row4;
+ tmp0 += -in_row6 + (DATA_TYPE)5.25f * in_row4 - (DATA_TYPE)5.25f * in_row2;
+
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ comm_fact1 = in_row1 + in_row5 - (DATA_TYPE)4.25f * in_row3;
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ comm_fact2 = (DATA_TYPE)0.25f * in_row2 - (DATA_TYPE)1.25f * in_row4 + in_row6;
+
+ const VEC_DATA_TYPE(DATA_TYPE, 8) tmp1 = comm_fact0 + comm_fact1;
+ const VEC_DATA_TYPE(DATA_TYPE, 8) tmp2 = comm_fact0 - comm_fact1;
+
+ comm_fact0 = (DATA_TYPE)2.5f * in_row3;
+ comm_fact1 = (DATA_TYPE)0.5f * in_row1 - comm_fact0 + (DATA_TYPE)2.0f * in_row5;
+
+ const VEC_DATA_TYPE(DATA_TYPE, 8) tmp3 = comm_fact1 + comm_fact2;
+ const VEC_DATA_TYPE(DATA_TYPE, 8) tmp4 = comm_fact2 - comm_fact1;
+
+ comm_fact1 = (DATA_TYPE)2.0f * in_row1 - comm_fact0 + (DATA_TYPE)0.5f * in_row5;
+ comm_fact2 = (DATA_TYPE)4.0f * in_row2 - (DATA_TYPE)5.0f * in_row4 + in_row6;
+
+ const VEC_DATA_TYPE(DATA_TYPE, 8) tmp5 = comm_fact1 + comm_fact2;
+ const VEC_DATA_TYPE(DATA_TYPE, 8) tmp6 = comm_fact2 - comm_fact1;
+ const VEC_DATA_TYPE(DATA_TYPE, 8) tmp7 = in_row7 - in_row1 + (DATA_TYPE)5.25f * in_row3 - (DATA_TYPE)5.25f * in_row5;
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ // Calculate output rows (reuse comm_fact0 vector)
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out0;
+
+ OUTPUT_ROW_4x4_5x5(out0, tmp0, comm_fact0);
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out1, out2, out3, out4, out5, out6, out7;
+
+ OUTPUT_ROW_4x4_5x5(out1, tmp1, comm_fact0);
+ OUTPUT_ROW_4x4_5x5(out2, tmp2, comm_fact0);
+ OUTPUT_ROW_4x4_5x5(out3, tmp3, comm_fact0);
+ OUTPUT_ROW_4x4_5x5(out4, tmp4, comm_fact0);
+ OUTPUT_ROW_4x4_5x5(out5, tmp5, comm_fact0);
+ OUTPUT_ROW_4x4_5x5(out6, tmp6, comm_fact0);
+ OUTPUT_ROW_4x4_5x5(out7, tmp7, comm_fact0);
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ // Store values across the channels
+#if defined(SRC_DEPTH)
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w;
+#else /* defined(SRC_DEPTH) */
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y;
+#endif /* defined(SRC_DEPTH) */
+
+ *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z)) = out0.s0;
+ *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z)) = out0.s1;
+ *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z)) = out0.s2;
+ *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z)) = out0.s3;
+ *((__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z)) = out0.s4;
+ *((__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z)) = out0.s5;
+ *((__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z)) = out0.s6;
+ *((__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z)) = out0.s7;
+
+#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+ *((__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z)) = out1.s0;
+ *((__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z)) = out1.s1;
+ *((__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z)) = out1.s2;
+ *((__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z)) = out1.s3;
+ *((__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z)) = out1.s4;
+ *((__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z)) = out1.s5;
+ *((__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z)) = out1.s6;
+ *((__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z)) = out1.s7;
+ *((__global DATA_TYPE *)(dst_addr + 16 * dst_stride_z)) = out2.s0;
+ *((__global DATA_TYPE *)(dst_addr + 17 * dst_stride_z)) = out2.s1;
+ *((__global DATA_TYPE *)(dst_addr + 18 * dst_stride_z)) = out2.s2;
+ *((__global DATA_TYPE *)(dst_addr + 19 * dst_stride_z)) = out2.s3;
+ *((__global DATA_TYPE *)(dst_addr + 20 * dst_stride_z)) = out2.s4;
+ *((__global DATA_TYPE *)(dst_addr + 21 * dst_stride_z)) = out2.s5;
+ *((__global DATA_TYPE *)(dst_addr + 22 * dst_stride_z)) = out2.s6;
+ *((__global DATA_TYPE *)(dst_addr + 23 * dst_stride_z)) = out2.s7;
+ *((__global DATA_TYPE *)(dst_addr + 24 * dst_stride_z)) = out3.s0;
+ *((__global DATA_TYPE *)(dst_addr + 25 * dst_stride_z)) = out3.s1;
+ *((__global DATA_TYPE *)(dst_addr + 26 * dst_stride_z)) = out3.s2;
+ *((__global DATA_TYPE *)(dst_addr + 27 * dst_stride_z)) = out3.s3;
+ *((__global DATA_TYPE *)(dst_addr + 28 * dst_stride_z)) = out3.s4;
+ *((__global DATA_TYPE *)(dst_addr + 29 * dst_stride_z)) = out3.s5;
+ *((__global DATA_TYPE *)(dst_addr + 30 * dst_stride_z)) = out3.s6;
+ *((__global DATA_TYPE *)(dst_addr + 31 * dst_stride_z)) = out3.s7;
+ *((__global DATA_TYPE *)(dst_addr + 32 * dst_stride_z)) = out4.s0;
+ *((__global DATA_TYPE *)(dst_addr + 33 * dst_stride_z)) = out4.s1;
+ *((__global DATA_TYPE *)(dst_addr + 34 * dst_stride_z)) = out4.s2;
+ *((__global DATA_TYPE *)(dst_addr + 35 * dst_stride_z)) = out4.s3;
+ *((__global DATA_TYPE *)(dst_addr + 36 * dst_stride_z)) = out4.s4;
+ *((__global DATA_TYPE *)(dst_addr + 37 * dst_stride_z)) = out4.s5;
+ *((__global DATA_TYPE *)(dst_addr + 38 * dst_stride_z)) = out4.s6;
+ *((__global DATA_TYPE *)(dst_addr + 39 * dst_stride_z)) = out4.s7;
+ *((__global DATA_TYPE *)(dst_addr + 40 * dst_stride_z)) = out5.s0;
+ *((__global DATA_TYPE *)(dst_addr + 41 * dst_stride_z)) = out5.s1;
+ *((__global DATA_TYPE *)(dst_addr + 42 * dst_stride_z)) = out5.s2;
+ *((__global DATA_TYPE *)(dst_addr + 43 * dst_stride_z)) = out5.s3;
+ *((__global DATA_TYPE *)(dst_addr + 44 * dst_stride_z)) = out5.s4;
+ *((__global DATA_TYPE *)(dst_addr + 45 * dst_stride_z)) = out5.s5;
+ *((__global DATA_TYPE *)(dst_addr + 46 * dst_stride_z)) = out5.s6;
+ *((__global DATA_TYPE *)(dst_addr + 47 * dst_stride_z)) = out5.s7;
+ *((__global DATA_TYPE *)(dst_addr + 48 * dst_stride_z)) = out6.s0;
+ *((__global DATA_TYPE *)(dst_addr + 49 * dst_stride_z)) = out6.s1;
+ *((__global DATA_TYPE *)(dst_addr + 50 * dst_stride_z)) = out6.s2;
+ *((__global DATA_TYPE *)(dst_addr + 51 * dst_stride_z)) = out6.s3;
+ *((__global DATA_TYPE *)(dst_addr + 52 * dst_stride_z)) = out6.s4;
+ *((__global DATA_TYPE *)(dst_addr + 53 * dst_stride_z)) = out6.s5;
+ *((__global DATA_TYPE *)(dst_addr + 54 * dst_stride_z)) = out6.s6;
+ *((__global DATA_TYPE *)(dst_addr + 55 * dst_stride_z)) = out6.s7;
+ *((__global DATA_TYPE *)(dst_addr + 56 * dst_stride_z)) = out7.s0;
+ *((__global DATA_TYPE *)(dst_addr + 57 * dst_stride_z)) = out7.s1;
+ *((__global DATA_TYPE *)(dst_addr + 58 * dst_stride_z)) = out7.s2;
+ *((__global DATA_TYPE *)(dst_addr + 59 * dst_stride_z)) = out7.s3;
+ *((__global DATA_TYPE *)(dst_addr + 60 * dst_stride_z)) = out7.s4;
+ *((__global DATA_TYPE *)(dst_addr + 61 * dst_stride_z)) = out7.s5;
+ *((__global DATA_TYPE *)(dst_addr + 62 * dst_stride_z)) = out7.s6;
+ *((__global DATA_TYPE *)(dst_addr + 63 * dst_stride_z)) = out7.s7;
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+}
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+/** This OpenCL kernel computes the input transform when the kernel size is 3x1 and the output tile is 2x1
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
+ * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_2x1_3x1_stepz1_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ winograd_input_transform_2x2_3x3_stepz1_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes,
+ src_stride_w,
+ dst_stride_w);
+}
+
+/** This OpenCL kernel computes the input transform when the kernel size is 3x1, the output tile is 2x1 and the number of channels is multiple of 2
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
+ * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_2x1_3x1_stepz2_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ winograd_input_transform_2x2_3x3_stepz2_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes,
+ src_stride_w,
+ dst_stride_w);
+}
+
+/** This OpenCL kernel computes the input transform when the kernel size is 3x1 and the output tile is 4x1
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
+ * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_4x1_3x1_stepz1_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ winograd_input_transform_4x4_3x3_stepz1_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes,
+ src_stride_w,
+ dst_stride_w);
+}
+
+/** This OpenCL kernel computes the input transform when the kernel size is 5x1 and the output tile is 4x1 when the data layout is NCHW
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_4x1_5x1_stepz1_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ winograd_input_transform_4x4_5x5_stepz1_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes,
+ src_stride_w,
+ dst_stride_w);
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+/** This OpenCL kernel computes the input transform when the kernel size is 1x3 and the output tile is 1x2
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_1x2_1x3_stepz1_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ winograd_input_transform_2x2_3x3_stepz1_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes,
+ src_stride_w,
+ dst_stride_w);
+}
+
+/** This OpenCL kernel computes the input transform when the kernel size is 1x3, the output tile is 1x2 and the number of channels is multiple of 2
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_1x2_1x3_stepz2_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ winograd_input_transform_2x2_3x3_stepz2_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes,
+ src_stride_w,
+ dst_stride_w);
+}
+
+/** This OpenCL kernel computes the input transform when the kernel size is 1x3 and the output tile is 1x4
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_1x4_1x3_stepz1_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ winograd_input_transform_4x4_3x3_stepz1_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes,
+ src_stride_w,
+ dst_stride_w);
+}
+
+/** This OpenCL kernel computes the input transform when the kernel size is 1x5 and the output tile is 1x4
+ *
+ * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
+ * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ */
+__kernel void winograd_input_transform_1x4_1x5_stepz1_nchw(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ winograd_input_transform_4x4_5x5_stepz1_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_offset_first_element_in_bytes,
+ src_stride_w,
+ dst_stride_w);
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+#endif // defined(NUM_TILES_X) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
diff --git a/src/core/CL/cl_kernels/nchw/winograd_output_transform.cl b/src/core/CL/cl_kernels/nchw/winograd_output_transform.cl
new file mode 100644
index 0000000000..861ed50651
--- /dev/null
+++ b/src/core/CL/cl_kernels/nchw/winograd_output_transform.cl
@@ -0,0 +1,1082 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#if defined(NUM_TILES_X) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
+#if defined(VEC_SIZE) && VEC_SIZE == 2
+/** This OpenCL kernel performs Winograd output transform when the output tile is 2x2/2x1 or 1x2, the filter size 3x3/3x1 or 1x3 and the data layout is NCHW
+ *
+ * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note If this kernel is used to perform Winograd output transform 3x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd output transform 1x3, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
+ * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. Accepted values are -DVEC_SIZE=2 (for output_tile_size 2x2, 2x1, 1x2) and -DVEC_SIZE=4 (for output_tile_size 4x4, 4x1, 1x4)
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_output_transform_2x2_3x3_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(HAS_BIAS)
+)
+{
+ // Each thread stores a 2x2/2x1 or 1x2 tile accordingly with the filter size
+#if defined(SRC_DEPTH)
+ Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
+ const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
+#else /* defined(SRC_DEPTH) */
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+ const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
+#endif /* defined(SRC_DEPTH) */
+
+ // Load the values across the 16 or 4 channels to compose the 4x4 or 4x1 tile
+ DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
+ DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
+ DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
+ DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ // Compute the 2x1 or 1x2 output tile
+ // out00 = d00 + d01 + d02
+ // out01 = d01 - d02 - d03
+
+ float out00 = d00 + d01 + d02;
+ float out01 = d01 - d02 - d03;
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
+ DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
+ DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
+ DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
+
+ DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
+ DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
+ DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
+ DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
+
+ DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
+ DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
+ DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
+ DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
+
+ // Compute the 2x2 output tile
+ float k0 = d01 + d11 + d21;
+ float k1 = d02 + d12 + d22;
+ float k2 = d11 - d21 - d31;
+ float k3 = d12 - d22 - d32;
+
+ // out00 = d00 + d10 + d20 + d01 + d11 + d21 + d02 + d12 + d22
+ // out01 = d01 + d11 + d21 - (d02 + d12 + d22) - (d03 + d13 + d23)
+ // out10 = d10 - d20 - d30 + (d11 - d21 - d31) + (d12 - d22 - d32)
+ // out11 = d11 - d21 - d31 - (d12 - d22 - d32) - (d13 - d23 - d33)
+
+ float out00 = d10;
+ float out01 = -d13;
+ float out10 = d10;
+ float out11 = -d13;
+
+ out00 += d00 + d20 + k0 + k1;
+ out01 += k0 - k1 - (d03 + d23);
+ out10 += -d20 - d30 + k2 + k3;
+ out11 += k2 - k3 + d23 + d33;
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ int y_in = get_global_id(1);
+ int x_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
+ int y_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
+ int z_out = get_global_id(0);
+#if defined(SRC_DEPTH)
+ int batch = get_global_id(2) / SRC_DEPTH;
+#endif /* defined(SRC_DEPTH) */
+
+#if defined(HAS_BIAS)
+ // Add bias
+ Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
+
+ float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
+
+ out00 += (float)b;
+ out01 += (float)b;
+#endif // defined(HAS_BIAS)
+
+ // Get output address
+#if defined(SRC_DEPTH)
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z + batch * dst_stride_w;
+#else /* defined(SRC_DEPTH) */
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z;
+#endif /* defined(SRC_DEPTH) */
+
+ // Store the output tile
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ const VEC_DATA_TYPE(DATA_TYPE, 2)
+ out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL);
+ *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)) = out0_dt.s0;
+ *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)) = out0_dt.s1;
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL), 0,
+ (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+#if !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+#if defined(HAS_BIAS)
+ // Add bias
+ out10 += (DATA_TYPE)b;
+ out11 += (DATA_TYPE)b;
+#endif // defined(HAS_BIAS)
+ vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out10, out11), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL), 0,
+ (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
+#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+}
+#endif // defined(VEC_SIZE) && VEC_SIZE == 2
+
+#if defined(VEC_SIZE) && VEC_SIZE == 4
+/** This OpenCL kernel performs Winograd output transform when the output tile is 4x4, the filter size 3x3 and the data layout is NCHW
+ *
+ * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd output transform 3x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd output transform 1x3, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_output_transform_4x4_3x3_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(HAS_BIAS)
+)
+{
+ // Each thread stores a 4x4/4x1 or 1x4 tile
+#if defined(SRC_DEPTH)
+ Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
+ const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
+#else /* defined(SRC_DEPTH) */
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+ const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
+#endif /* defined(SRC_DEPTH) */
+
+ // Load the values across the channels to compose the 6x6 or 6x1 tile
+ DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
+ DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
+ DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
+ DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
+ DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
+ DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ // Compute out00, out01, out02 and out03
+ float out00 = d00 + d01 + d02 + d03 + d04;
+ float out01 = d01 - d02 + 2.0f * d03 - 2.0f * d04;
+ float out02 = d01 + d02 + 4.0f * d03 + 4.0f * d04;
+ float out03 = d01 - d02 + 8.0f * d03 - 8.0f * d04 + d05;
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
+ DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
+ DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
+ DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
+ DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
+ DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
+
+ DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
+ DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
+ DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
+ DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
+ DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
+ DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
+
+ DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
+ DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
+ DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
+ DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
+ DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
+ DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
+
+ DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
+ DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
+ DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
+ DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
+ DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
+ DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
+
+ DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
+ DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
+ DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
+ DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
+ DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
+ DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
+
+ // Compute out00, out01, out02 and out03
+ float out00 = (float)d01 + (float)d21 + (float)d41 + (float)d11 + (float)d31;
+ float out01 = (float)d01 + (float)d21 + (float)d41 + (float)d11 + (float)d31;
+ float out02 = (float)d01 + (float)d21 + (float)d41 + (float)d11 + (float)d31;
+ float out03 = (float)d01 + d21 + (float)d41 + (float)d11 + (float)d31;
+
+ float k0 = d03 + d04 + d13 + d14 + d23 + d24 + d33 + d34 + d43 + d44;
+ float k1 = 2.0f * d03 - 2.0f * d04 + 2.0f * d13 - 2.0f * d14 + 2.0f * d23 - 2.0f * d24 + 2.0f * d33 - 2.0f * d34 + 2.0f * d43 - 2.0f * d44;
+
+ out00 += k0 + d00 + d02 + d10 + d12 + d20 + d22 + d30 + d32 + d40 + d42;
+ out01 += k1 - d02 - d12 - d22 - d32 - d42;
+ out02 += 4.0f * k0 + d02 + d12 + d22 + d32 + d42;
+ out03 += 4.0f * k1 - d02 - d12 - d22 - d32 - d42 + d05 + d15 + d25 + d35 + d45;
+
+ // Compute out10, out11, out12 and out13
+ float out10 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
+ float out11 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
+ float out12 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
+ float out13 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
+
+ k0 = d13 + d14 - d23 - d24 + 2.0f * d33 + 2.0f * d34 - 2.0f * d43 - 2.0f * d44;
+ k1 = 2.0f * d13 - 2.0f * d14 - 2.0f * d23 + 2.0f * d24 + 4.0f * d33 - 4.0f * d34 - 4.0f * d43 + 4.0f * d44;
+
+ out10 += k0 + d10 + d12 - d20 - d22 + 2.0f * d30 + 2.0f * d32 - 2.0f * d40 - 2.0f * d42;
+ out11 += k1 - d12 + d22 - 2.0f * d32 + 2.0f * d42;
+ out12 += 4.0f * k0 + d12 - d22 + 2.0f * d32 - 2.0f * d42;
+ out13 += 4.0f * k1 - d12 + d15 + d22 - d25 - 2.0f * d32 + 2.0f * d35 + 2.0f * d42 - 2.0f * d45;
+
+ // Compute out20, out21, out22 and out23
+ float out20 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
+ float out21 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
+ float out22 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
+ float out23 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
+
+ k0 = d13 + d14 + d23 + d24 + 4.0f * d33 + 4.0f * d34 + 4.0f * d43 + 4.0f * d44;
+ k1 = 2.0f * d13 - 2.0f * d14 + 2.0f * d23 - 2.0f * d24 + 8.0f * d33 - 8.0f * d34 + 8.0f * d43 - 8.0f * d44;
+
+ out20 += k0 + d10 + d12 + d20 + d22 + 4.0f * d30 + 4.0f * d32 + 4.0f * d40 + 4.0f * d42;
+ out21 += k1 - d12 - d22 - 4.0f * d32 - 4.0f * d42;
+ out22 += 4.0f * k0 + d12 + d22 + 4.0f * d32 + 4.0f * d42;
+ out23 += 4.0f * k1 - d12 + d15 - d22 + d25 - 4.0f * d32 + 4.0f * d35 - 4.0f * d42 + 4.0f * d45;
+
+ // Compute out30, out31, out32 and out33
+ float out30 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
+ float out31 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
+ float out32 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
+ float out33 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
+
+ k0 = d13 + d14 - d23 - d24 + 8.0f * d33 + 8.0f * d34 - 8.0f * d43 - 8.0f * d44 + d53 + d54;
+ k1 = 2.0f * d13 - 2.0f * d14 - 2.0f * d23 + 2.0f * d24 + 16.0f * d33 - 16.0f * d34 - 16.0f * d43 + 16.0f * d44 + 2.0f * d53 - 2.0f * d54;
+
+ out30 += k0 + d10 + d12 - d20 - d22 + 8.0f * d30 + 8.0f * d32 - 8.0f * d40 - 8.0f * d42 + d50 + d52;
+ out31 += k1 - d12 + d22 - 8.0f * d32 + 8.0f * d42 - d52;
+ out32 += 4.0f * k0 + d12 - d22 + 8.0f * d32 - 8.0f * d42 + d52;
+ out33 += 4.0f * k1 - d12 + d15 + d22 - d25 - 8.0f * d32 + 8.0f * d35 + 8.0f * d42 - 8.0f * d45 - d52 + d55;
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ int y_in = get_global_id(1);
+ int x_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
+ int y_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
+ int z_out = get_global_id(0);
+#if defined(SRC_DEPTH)
+ int batch = get_global_id(2) / SRC_DEPTH;
+#endif /* defined(SRC_DEPTH) */
+
+#if defined(HAS_BIAS)
+ // Add bias
+ Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
+
+ float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
+
+ out00 += (float)b;
+ out01 += (float)b;
+ out02 += (float)b;
+ out03 += (float)b;
+#endif // defined(HAS_BIAS)
+
+ // Get output address
+#if defined(SRC_DEPTH)
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z + batch * dst_stride_w;
+#else /* defined(SRC_DEPTH) */
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z;
+#endif /* defined(SRC_DEPTH) */
+
+ // Store the output tile
+ const VEC_DATA_TYPE(DATA_TYPE, 4)
+ out0_dt = CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4));
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)) = out0_dt.s0;
+ *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)) = out0_dt.s1;
+ *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y)) = out0_dt.s2;
+ *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y)) = out0_dt.s3;
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ vstore4(out0_dt, 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+#if !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+#if defined(HAS_BIAS)
+ // Add bias
+ out10 += (float)b;
+ out11 += (float)b;
+ out12 += (float)b;
+ out13 += (float)b;
+
+ out20 += (float)b;
+ out21 += (float)b;
+ out22 += (float)b;
+ out23 += (float)b;
+
+ out30 += (float)b;
+ out31 += (float)b;
+ out32 += (float)b;
+ out33 += (float)b;
+#endif // defined(HAS_BIAS)
+ vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out10, out11, out12, out13), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4)), 0,
+ (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
+ vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out20, out21, out22, out23), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4)), 0,
+ (__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y));
+ vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out30, out31, out32, out33), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4)), 0,
+ (__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y));
+#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+}
+
+#define COMPUTE_TMP_COL(col, d0, d1, d2, d3, d4, d5, d6, d7, comm_fact) \
+ ({ \
+ comm_fact.s0 = d1 + d2; \
+ comm_fact.s1 = d3 + d4; \
+ comm_fact.s2 = d5 + d6; \
+ \
+ col.s0 = comm_fact.s0 + comm_fact.s1 + 8.f * comm_fact.s2 + d0; \
+ col.s2 = comm_fact.s0 + 4.f * comm_fact.s1 + 2.f * comm_fact.s2; \
+ \
+ comm_fact.s0 = d1 - d2; \
+ comm_fact.s1 = d3 - d4; \
+ comm_fact.s2 = d5 - d6; \
+ \
+ col.s1 = comm_fact.s0 + 2.f * comm_fact.s1 + 4.f * comm_fact.s2; \
+ col.s3 = comm_fact.s0 + 8.f * comm_fact.s1 + comm_fact.s2 + d7; \
+ })
+
+/** This OpenCL kernel performs Winograd output transform when the output tile is 4x4/4x1 or 1x4, the filter size 5x5/5x1 or 1x5 and the data layout is NCHW
+ *
+ * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd output transform 3x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd output transform 1x3, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_output_transform_4x4_5x5_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(HAS_BIAS)
+)
+{
+ // Each thread stores a 4x4/4x1 or 1x4 tile
+#if defined(SRC_DEPTH)
+ Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
+ const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
+#else /* defined(SRC_DEPTH) */
+
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+ const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
+#endif /* defined(SRC_DEPTH) */
+
+ // Compute output address
+ int y_in = get_global_id(1);
+ int x_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
+ int y_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
+ int z_out = get_global_id(0);
+#if defined(SRC_DEPTH)
+ int batch = get_global_id(2) / SRC_DEPTH;
+#endif /* defined(SRC_DEPTH) */
+
+#if defined(SRC_DEPTH)
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z + batch * dst_stride_w;
+#else /* defined(SRC_DEPTH) */
+
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z;
+#endif /* defined(SRC_DEPTH) */
+
+ // Load the values across the channels to compose the input tile
+ DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
+ DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
+ DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
+ DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
+ DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
+ DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
+ DATA_TYPE d06 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
+ DATA_TYPE d07 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ // Compute out00, out01, out02 and out03
+ float out00 = d00 + d01 + d02 + d03 + d04 + 8.0f * d05 + 8.0f * d06;
+ float out01 = d01 - d02 + 2.0f * d03 - 2.0f * d04 + 4.0f * d05 - 4.0f * d06;
+ float out02 = d01 + d02 + 4.0f * d03 + 4.0f * d04 + 2.0f * d05 + 2.0f * d06;
+ float out03 = d01 - d02 + 8.0f * d03 - 8.0f * d04 + d05 - d06 + d07;
+
+#if defined(HAS_BIAS)
+ // Add bias
+ Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
+
+ float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
+
+ out00 += (DATA_TYPE)b;
+ out01 += (DATA_TYPE)b;
+ out02 += (DATA_TYPE)b;
+ out03 += (DATA_TYPE)b;
+#endif // defined(HAS_BIAS)
+
+ // Store the output tile
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ out0_dt = CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), A_VAL,
+ B_VAL),
+ VEC_DATA_TYPE(DATA_TYPE, 4));
+ *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)) = out0_dt.s0;
+ *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)) = out0_dt.s1;
+ *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y)) = out0_dt.s2;
+ *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y)) = out0_dt.s3;
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4)),
+ 0, (__global DATA_TYPE *)(dst_addr));
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
+ DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
+ DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
+ DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
+ DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
+ DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
+ DATA_TYPE d16 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
+ DATA_TYPE d17 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
+
+ DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
+ DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
+ DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
+ DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
+ DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
+ DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
+ DATA_TYPE d26 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
+ DATA_TYPE d27 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
+
+ DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
+ DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
+ DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
+ DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
+ DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
+ DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
+ DATA_TYPE d36 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
+ DATA_TYPE d37 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
+
+ DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
+ DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
+ DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
+ DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
+ DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 36 * src_stride_z));
+ DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 37 * src_stride_z));
+ DATA_TYPE d46 = *((__global DATA_TYPE *)(src_addr + 38 * src_stride_z));
+ DATA_TYPE d47 = *((__global DATA_TYPE *)(src_addr + 39 * src_stride_z));
+
+ DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 40 * src_stride_z));
+ DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 41 * src_stride_z));
+ DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 42 * src_stride_z));
+ DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 43 * src_stride_z));
+ DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 44 * src_stride_z));
+ DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 45 * src_stride_z));
+ DATA_TYPE d56 = *((__global DATA_TYPE *)(src_addr + 46 * src_stride_z));
+ DATA_TYPE d57 = *((__global DATA_TYPE *)(src_addr + 47 * src_stride_z));
+
+ DATA_TYPE d60 = *((__global DATA_TYPE *)(src_addr + 48 * src_stride_z));
+ DATA_TYPE d61 = *((__global DATA_TYPE *)(src_addr + 49 * src_stride_z));
+ DATA_TYPE d62 = *((__global DATA_TYPE *)(src_addr + 50 * src_stride_z));
+ DATA_TYPE d63 = *((__global DATA_TYPE *)(src_addr + 51 * src_stride_z));
+ DATA_TYPE d64 = *((__global DATA_TYPE *)(src_addr + 52 * src_stride_z));
+ DATA_TYPE d65 = *((__global DATA_TYPE *)(src_addr + 53 * src_stride_z));
+ DATA_TYPE d66 = *((__global DATA_TYPE *)(src_addr + 54 * src_stride_z));
+ DATA_TYPE d67 = *((__global DATA_TYPE *)(src_addr + 55 * src_stride_z));
+
+ DATA_TYPE d70 = *((__global DATA_TYPE *)(src_addr + 56 * src_stride_z));
+ DATA_TYPE d71 = *((__global DATA_TYPE *)(src_addr + 57 * src_stride_z));
+ DATA_TYPE d72 = *((__global DATA_TYPE *)(src_addr + 58 * src_stride_z));
+ DATA_TYPE d73 = *((__global DATA_TYPE *)(src_addr + 59 * src_stride_z));
+ DATA_TYPE d74 = *((__global DATA_TYPE *)(src_addr + 60 * src_stride_z));
+ DATA_TYPE d75 = *((__global DATA_TYPE *)(src_addr + 61 * src_stride_z));
+ DATA_TYPE d76 = *((__global DATA_TYPE *)(src_addr + 62 * src_stride_z));
+ DATA_TYPE d77 = *((__global DATA_TYPE *)(src_addr + 63 * src_stride_z));
+
+ // Compute the 8x4 intermediate tensor
+ VEC_DATA_TYPE(float, 4)
+ comm_fact0, comm_fact1, comm_fact2;
+ VEC_DATA_TYPE(float, 4)
+ tmp_col0, tmp_col1, tmp_col2, tmp_col3, tmp_col4, tmp_col5, tmp_col6, tmp_col7;
+
+ COMPUTE_TMP_COL(tmp_col0, d00, d10, d20, d30, d40, d50, d60, d70, comm_fact0);
+ COMPUTE_TMP_COL(tmp_col1, d01, d11, d21, d31, d41, d51, d61, d71, comm_fact0);
+ COMPUTE_TMP_COL(tmp_col2, d02, d12, d22, d32, d42, d52, d62, d72, comm_fact0);
+ COMPUTE_TMP_COL(tmp_col3, d03, d13, d23, d33, d43, d53, d63, d73, comm_fact0);
+ COMPUTE_TMP_COL(tmp_col4, d04, d14, d24, d34, d44, d54, d64, d74, comm_fact0);
+ COMPUTE_TMP_COL(tmp_col5, d05, d15, d25, d35, d45, d55, d65, d75, comm_fact0);
+ COMPUTE_TMP_COL(tmp_col6, d06, d16, d26, d36, d46, d56, d66, d76, comm_fact0);
+ COMPUTE_TMP_COL(tmp_col7, d07, d17, d27, d37, d47, d57, d67, d77, comm_fact0);
+
+ // Compute the 4x4 output tile
+ comm_fact0 = tmp_col1 + tmp_col2;
+ comm_fact1 = tmp_col3 + tmp_col4;
+ comm_fact2 = tmp_col5 + tmp_col6;
+
+ VEC_DATA_TYPE(float, 4)
+ out_col0 = comm_fact0 + comm_fact1 + (float)8.f * comm_fact2 + tmp_col0;
+ VEC_DATA_TYPE(float, 4)
+ out_col2 = comm_fact0 + (float)4.f * comm_fact1 + (float)2.f * comm_fact2;
+
+ comm_fact0 = tmp_col1 - tmp_col2;
+ comm_fact1 = tmp_col3 - tmp_col4;
+ comm_fact2 = tmp_col5 - tmp_col6;
+
+ VEC_DATA_TYPE(float, 4)
+ out_col1 = comm_fact0 + (float)2.f * comm_fact1 + (float)4.f * comm_fact2;
+ VEC_DATA_TYPE(float, 4)
+ out_col3 = comm_fact0 + (float)8.f * comm_fact1 + comm_fact2 + tmp_col7;
+
+#if defined(HAS_BIAS)
+ // Add bias
+ Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
+
+ float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
+
+ out_col0 += (VEC_DATA_TYPE(float, 4))b;
+ out_col1 += (VEC_DATA_TYPE(float, 4))b;
+ out_col2 += (VEC_DATA_TYPE(float, 4))b;
+ out_col3 += (VEC_DATA_TYPE(float, 4))b;
+#endif // defined(HAS_BIAS)
+
+ // Store the output tile
+ vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out_col0.s0, out_col1.s0, out_col2.s0, out_col3.s0), A_VAL, B_VAL),
+ VEC_DATA_TYPE(DATA_TYPE, 4)),
+ 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
+ vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out_col0.s1, out_col1.s1, out_col2.s1, out_col3.s1), A_VAL, B_VAL),
+ VEC_DATA_TYPE(DATA_TYPE, 4)),
+ 0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
+ vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out_col0.s2, out_col1.s2, out_col2.s2, out_col3.s2), A_VAL, B_VAL),
+ VEC_DATA_TYPE(DATA_TYPE, 4)),
+ 0, (__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y));
+ vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out_col0.s3, out_col1.s3, out_col2.s3, out_col3.s3), A_VAL, B_VAL),
+ VEC_DATA_TYPE(DATA_TYPE, 4)),
+ 0, (__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y));
+#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+}
+#endif // defined(VEC_SIZE) && VEC_SIZE == 4
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
+#if defined(VEC_SIZE) && VEC_SIZE == 2
+/** This OpenCL kernel performs Winograd output transform when the output tile is 2x1, the filter size 3x1 and the data layout is NCHW
+ *
+ * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_output_transform_2x1_3x1_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(HAS_BIAS)
+)
+{
+ winograd_output_transform_2x2_3x3_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes
+#if defined(HAS_BIAS)
+ ,
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes
+#endif // defined(HAS_BIAS)
+ );
+}
+
+#endif // defined(VEC_SIZE) && VEC_SIZE == 2
+
+#if defined(VEC_SIZE) && VEC_SIZE == 4
+/** This OpenCL kernel performs Winograd output transform when the output tile is 4x1, the filter size 3x1 and the data layout is NCHW
+ *
+ * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_output_transform_4x1_3x1_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(HAS_BIAS)
+)
+{
+ winograd_output_transform_4x4_3x3_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes
+#if defined(HAS_BIAS)
+ ,
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes
+#endif // defined(HAS_BIAS)
+ );
+}
+
+/** This OpenCL kernel performs Winograd output transform when the output tile is 4x1, the filter size 5x1 and the data layout is NCHW
+ *
+ * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_output_transform_4x1_5x1_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(HAS_BIAS)
+)
+{
+ winograd_output_transform_4x4_5x5_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes
+#if defined(HAS_BIAS)
+ ,
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes
+#endif // defined(HAS_BIAS)
+ );
+}
+
+#endif // defined(VEC_SIZE) && VEC_SIZE == 4
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+#if defined(VEC_SIZE) && VEC_SIZE == 2
+/** This OpenCL kernel performs Winograd output transform when the output tile is 1x2, the filter size 1x3 and the data layout is NCHW
+ *
+ * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_output_transform_1x2_1x3_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(HAS_BIAS)
+)
+{
+ winograd_output_transform_2x2_3x3_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes
+#if defined(HAS_BIAS)
+ ,
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes
+#endif // defined(HAS_BIAS)
+ );
+}
+
+#endif // defined(VEC_SIZE) && VEC_SIZE == 2
+
+#if defined(VEC_SIZE) && VEC_SIZE == 4
+/** This OpenCL kernel performs Winograd output transform when the output tile is 1x4, the filter size 1x3 and the data layout is NCHW
+ *
+ * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_output_transform_1x4_1x3_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(HAS_BIAS)
+)
+{
+ winograd_output_transform_4x4_3x3_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes
+#if defined(HAS_BIAS)
+ ,
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes
+#endif // defined(HAS_BIAS)
+ );
+}
+
+/** This OpenCL kernel performs Winograd output transform when the output tile is 1x4, the filter size 1x5 and the data layout is NCHW
+ *
+ * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_output_transform_1x4_1x5_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(HAS_BIAS)
+)
+{
+ winograd_output_transform_4x4_5x5_nchw(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes
+#if defined(HAS_BIAS)
+ ,
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes
+#endif // defined(HAS_BIAS)
+ );
+}
+
+#endif // defined(VEC_SIZE) && VEC_SIZE == 4
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+#endif // defined(NUM_TILES_X) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
diff --git a/src/core/CL/cl_kernels/depth_to_space.cl b/src/core/CL/cl_kernels/nhwc/batch_to_space.cl
index d3231a59a1..b910a753a6 100644
--- a/src/core/CL/cl_kernels/depth_to_space.cl
+++ b/src/core/CL/cl_kernels/nhwc/batch_to_space.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,14 +23,16 @@
*/
#include "helpers.h"
-#if defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
-/** Batch to space transformation. (NCHW)
+#if defined(DATA_TYPE) && defined(BATCH_SIZE)
+/** Batch to space transformation. (NHWC)
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The input tensor batch size must be passed at compile time using -DCHANNEL_SIZE. e.g. -DCHANNEL_SIZE=2
- * @note The block shape must be passed at compile time using -DBLOCK_SHAPE. e.g. -DBLOCK_SHAPE=2
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: All.
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
* @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
@@ -39,6 +41,12 @@
* @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
* @param[in] batch_id The input tensor batch id
+ * @param[in] block_shape_ptr Pointer to the source tensor. Supported data types: S32
+ * @param[in] block_shape_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] block_shape_step_x block_shape_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] block_shape_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] block_shape_step_y block_shape_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
* @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
* @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
@@ -48,31 +56,40 @@
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
-__kernel void depth_to_space_nchw(
- TENSOR3D_DECLARATION(input),
+__kernel void batch_to_space_nhwc(
+ TENSOR4D_DECLARATION(input),
const int batch_id,
- TENSOR4D_DECLARATION(output))
+ VECTOR_DECLARATION(block_shape),
+ TENSOR3D_DECLARATION(output))
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
+ Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
- const int r = (CHANNEL_SIZE / (BLOCK_SHAPE * BLOCK_SHAPE));
- const int x = get_global_id(0);
- const int y = get_global_id(1);
- const int z = get_global_id(2) % r;
+ const int block_x = *((__global int *)vector_offset(&block, 0));
+ const int block_y = *((__global int *)vector_offset(&block, 1));
- const int out_x = x * BLOCK_SHAPE + (get_global_id(2) / r) % BLOCK_SHAPE;
- const int out_y = y * BLOCK_SHAPE + (get_global_id(2) / r) / BLOCK_SHAPE;
+ const int x = get_global_id(1);
+ const int y = get_global_id(2);
+ const int z = get_global_id(0);
- *((__global DATA_TYPE *)tensor4D_offset(&out, out_x, out_y, z, batch_id)) = *((__global DATA_TYPE *)in.ptr);
+ const int in_batch = batch_id + ((x % block_x) + (y % block_y) * (block_x)) * BATCH_SIZE;
+ const int in_x = x / block_x;
+ const int in_y = y / block_y;
+
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, z, in_x, in_y, in_batch));
}
+#endif // defined(DATA_TYPE) && defined(BATCH_SIZE)
+
+#if defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y)
/** Batch to space transformation. (NHWC)
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The input tensor batch size must be passed at compile time using -DCHANNEL_SIZE. e.g. -DCHANNEL_SIZE=2
- * @note The block shape must be passed at compile time using -DBLOCK_SHAPE. e.g. -DBLOCK_SHAPE=2
+ * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
+ * @note The block shape x must be passed at compile time using -DBLOCK_SHAPE_X. e.g. -DBLOCK_SHAPE_X=2
+ * @note The block shape y must be passed at compile time using -DBLOCK_SHAPE_Y. e.g. -DBLOCK_SHAPE_Y=2
*
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: All.
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
* @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
@@ -90,22 +107,25 @@ __kernel void depth_to_space_nchw(
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
-__kernel void depth_to_space_nhwc(
- TENSOR3D_DECLARATION(input),
+__kernel void batch_to_space_static_nhwc(
+ TENSOR4D_DECLARATION(input),
const int batch_id,
- TENSOR4D_DECLARATION(output))
+ TENSOR3D_DECLARATION(output))
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
- const int r = (CHANNEL_SIZE / (BLOCK_SHAPE * BLOCK_SHAPE));
- const int x = get_global_id(1);
- const int y = get_global_id(2);
- const int z = get_global_id(0) % r;
+ const int block_x = BLOCK_SHAPE_X;
+ const int block_y = BLOCK_SHAPE_Y;
+
+ const int x = get_global_id(1) + CROP_LEFT;
+ const int y = get_global_id(2) + CROP_TOP;
+ const int z = get_global_id(0);
- const int out_x = x * BLOCK_SHAPE + (get_global_id(0) / r) % BLOCK_SHAPE;
- const int out_y = y * BLOCK_SHAPE + (get_global_id(0) / r) / BLOCK_SHAPE;
+ const int in_batch = batch_id + ((x % block_x) + (y % block_y) * (block_x)) * BATCH_SIZE;
+ const int in_x = x / block_x;
+ const int in_y = y / block_y;
- *((__global DATA_TYPE *)tensor4D_offset(&out, z, out_x, out_y, batch_id)) = *((__global DATA_TYPE *)in.ptr);
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, z, in_x, in_y, in_batch));
}
-#endif // defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE) \ No newline at end of file
+#endif // defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y)
diff --git a/src/core/CL/cl_kernels/nhwc/batchnormalization_layer.cl b/src/core/CL/cl_kernels/nhwc/batchnormalization_layer.cl
new file mode 100644
index 0000000000..cb2da1bd99
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/batchnormalization_layer.cl
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#define ADD_OP(a, b) ((a) + (b))
+#define SUB_OP(a, b) ((a) - (b))
+#define MUL_OP(a, b) ((a) * (b))
+#define INVSQRT_OP(a) rsqrt((a))
+#define SQCVT_SAT(a) (a)
+
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(ACTIVATION_TYPE)
+#include "activation_float_helpers.h"
+
+/** Apply batch normalization on tensors with NHWC format.
+ *
+ * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
+ * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
+ *
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
+ * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
+ * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
+ * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
+ * @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
+ * @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
+ * @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
+ * @param[in] beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
+ * @param[in] beta_step_x beta_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
+ * @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
+ * @param[in] gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
+ * @param[in] gamma_step_x gamma_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
+ * @param[in] epsilon Epsilon parameter in the batch normalization equation
+ */
+__kernel void batchnormalization_layer_nhwc(TENSOR3D_DECLARATION(input),
+#ifndef IN_PLACE
+ TENSOR3D_DECLARATION(output),
+#endif /* not IN_PLACE */
+ VECTOR_DECLARATION(mean),
+ VECTOR_DECLARATION(var),
+#ifndef USE_DEFAULT_BETA
+ VECTOR_DECLARATION(beta),
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+ VECTOR_DECLARATION(gamma),
+#endif /* USE_DEFAULT_GAMMA */
+ float epsilon)
+{
+ uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0);
+
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z;
+#ifdef IN_PLACE
+ __global uchar *output_addr = input_ptr;
+#else /* IN_PLACE */
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z;
+#endif /* IN_PLACE */
+ __global uchar *mean_addr = mean_ptr + mean_offset_first_element_in_bytes + x_offs;
+ __global uchar *var_addr = var_ptr + var_offset_first_element_in_bytes + x_offs;
+#ifndef USE_DEFAULT_BETA
+ __global uchar *beta_addr = beta_ptr + beta_offset_first_element_in_bytes + x_offs;
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+ __global uchar *gamma_addr = gamma_ptr + gamma_offset_first_element_in_bytes + x_offs;
+#endif /* USE_DEFAULT_GAMMA */
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ denominator = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ numerator = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ x_bar = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res0 = 0;
+
+ data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr);
+ denominator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)var_addr);
+ denominator = INVSQRT_OP(ADD_OP(denominator, ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(epsilon))));
+
+ // Calculate x bar and store results
+ numerator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)mean_addr);
+ numerator = SUB_OP(data, numerator);
+ x_bar = MUL_OP(numerator, denominator);
+
+#ifndef USE_DEFAULT_GAMMA
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ gamma_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)gamma_addr);
+
+ res0 = MUL_OP(gamma_vec, x_bar);
+#else /* USE_DEFAULT_GAMMA */
+ // gamma is equal to 1, no need to perform multiplications
+ res0 = x_bar;
+#endif /* USE_DEFAULT_GAMMA */
+
+#ifndef USE_DEFAULT_BETA
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ beta_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)beta_addr);
+ // beta is not zero, hence we need to perform the addition
+ res0 = ADD_OP(res0, beta_vec);
+#endif /* USE_DEFAULT_BETA */
+
+ res0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, res0, A_VAL, B_VAL);
+
+ STORE_VECTOR_SELECT(res, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
+}
+#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE)*/ \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nhwc/channel_shuffle.cl b/src/core/CL/cl_kernels/nhwc/channel_shuffle.cl
new file mode 100644
index 0000000000..233beb3aa9
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/channel_shuffle.cl
@@ -0,0 +1,160 @@
+/*
+* Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z)
+
+// Check valid VEC_SIZES
+#if VEC_SIZE != 1 && VEC_SIZE != 2 && VEC_SIZE != 3 && VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16
+#error "Only vector sizes 1, 2, 3, 4, 8 and 16 are supported"
+#endif // VEC_SIZE != 1 && VEC_SIZE != 2 && VEC_SIZE != 3 && VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16
+
+#define DIV_MOD_UINT(x, y, div_res, mod_res) \
+ ({ \
+ div_res = (uint)((x) * (float)(1.0f / (float)(y))); \
+ uint r = div_res * (y); \
+ mod_res = (x)-r; \
+ })
+
+#if defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_DIM_X)
+
+/** Performs channel shuffle when the data layout is NHWC. See https://arxiv.org/pdf/1707.01083.pdf for details.
+ *
+ * @note The vector size must be given as a preprocessor argument using -DVEC_SIZE=num. e.g. -DVEC_SIZE=4
+ * @note The third dimension of the tensor must be given as a preprocessor argument using -DSRC_DIM_Z=num. e.g. -DSRC_DIM_Z=64
+ * @note The first dimension of the tensor must be given as a preprocessor argument using -DSRC_DIM_X=num. e.g. -DSRC_DIM_X=64
+ * @note The number of groups must be given as a preprocessor argument using -DNUM_GROUPS=num_groups. e.g. -DNUM_GROUPS=2
+ * @note The number of channels in each group must be given as a preprocessor argument using -DK=num. e.g. -DK=1
+ * K is equal to num_channels / num_groups.
+ * @note The leftover size in the X dimension shoud be given as preprocessor argument using -DVEC_SIZE_LEFTOVER is; x_dimension % VEC_SIZE. e.g. -DVEC_SIZE_LEFTOVER=1
+ *
+ * @param[in] src_ptr Pointer to the source matrix. Supported data types: All
+ * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] src_step_w src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void channel_shuffle_nhwc(TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst))
+{
+ // Offset computation
+ const uint curr_out_channel = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER); // output feature map
+
+ uint z = 0;
+ uint batch_id = 0;
+ // Compute curr_channel and batch_id
+ DIV_MOD_UINT(get_global_id(2), (uint)SRC_DIM_Z, batch_id, z);
+
+ VEC_DATA_TYPE(uint, VEC_SIZE)
+ curr_out_channels = (VEC_DATA_TYPE(uint, VEC_SIZE))(curr_out_channel) + VEC_OFFS(uint, VEC_SIZE);
+
+ VEC_DATA_TYPE(uint, VEC_SIZE)
+ in_channels = (curr_out_channels * (VEC_DATA_TYPE(uint, VEC_SIZE))(K)) % (VEC_DATA_TYPE(uint, VEC_SIZE))(SRC_DIM_X) + (curr_out_channels / (VEC_DATA_TYPE(uint, VEC_SIZE))(NUM_GROUPS));
+
+ // Load the values
+ const __global DATA_TYPE *input_ptr = (const __global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + get_global_id(1) * src_stride_y + z * src_stride_z + batch_id * src_stride_w);
+
+#if VEC_SIZE == 1
+ DATA_TYPE out0 = *((const __global * DATA_TYPE)(input_ptr) + in_channels);
+#elif VEC_SIZE == 2
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ out0 =
+ {
+ *(input_ptr + in_channels.s0),
+ *(input_ptr + in_channels.s1)
+ };
+#elif VEC_SIZE == 3
+ VEC_DATA_TYPE(DATA_TYPE, 3)
+ out0 =
+ {
+ *(input_ptr + in_channels.s0),
+ *(input_ptr + in_channels.s1),
+ *(input_ptr + in_channels.s2)
+ };
+#elif VEC_SIZE == 4
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ out0 =
+ {
+ *(input_ptr + in_channels.s0),
+ *(input_ptr + in_channels.s1),
+ *(input_ptr + in_channels.s2),
+ *(input_ptr + in_channels.s3)
+ };
+#elif VEC_SIZE == 8
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ out0 =
+ {
+ *(input_ptr + in_channels.s0),
+ *(input_ptr + in_channels.s1),
+ *(input_ptr + in_channels.s2),
+ *(input_ptr + in_channels.s3),
+ *(input_ptr + in_channels.s4),
+ *(input_ptr + in_channels.s5),
+ *(input_ptr + in_channels.s6),
+ *(input_ptr + in_channels.s7)
+ };
+#elif VEC_SIZE == 16
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ out0 =
+ {
+ *(input_ptr + in_channels.s0),
+ *(input_ptr + in_channels.s1),
+ *(input_ptr + in_channels.s2),
+ *(input_ptr + in_channels.s3),
+ *(input_ptr + in_channels.s4),
+ *(input_ptr + in_channels.s5),
+ *(input_ptr + in_channels.s6),
+ *(input_ptr + in_channels.s7),
+ *(input_ptr + in_channels.s8),
+ *(input_ptr + in_channels.s9),
+ *(input_ptr + in_channels.sa),
+ *(input_ptr + in_channels.sb),
+ *(input_ptr + in_channels.sc),
+ *(input_ptr + in_channels.sd),
+ *(input_ptr + in_channels.se),
+ *(input_ptr + in_channels.sf)
+ };
+#endif // VEC_SIZE == 1
+
+ __global uchar *output_ptr = dst_ptr + curr_out_channel * sizeof(DATA_TYPE) + dst_offset_first_element_in_bytes + get_global_id(1) * dst_stride_y + z * dst_stride_z + batch_id * dst_stride_w;
+ STORE_VECTOR_SELECT(out, DATA_TYPE, output_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
+}
+#endif // defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_DIM_X)
+#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nhwc/depth_to_space.cl b/src/core/CL/cl_kernels/nhwc/depth_to_space.cl
new file mode 100644
index 0000000000..84f8aa7263
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/depth_to_space.cl
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019-2021, 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
+/** Depth to space transformation. (NHWC)
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The input tensor depth size must be passed at compile time using -DCHANNEL_SIZE. e.g. -DCHANNEL_SIZE=2
+ * @note The block shape must be passed at compile time using -DBLOCK_SHAPE. e.g. -DBLOCK_SHAPE=2
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: All.
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[in] batch_id The input tensor batch id
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void depth_to_space_nhwc(
+ TENSOR3D_DECLARATION(input),
+ const int batch_id,
+ TENSOR4D_DECLARATION(output))
+{
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output);
+
+ const int r = (CHANNEL_SIZE / (BLOCK_SHAPE * BLOCK_SHAPE));
+ const int x = get_global_id(1);
+ const int y = get_global_id(2);
+ const int z = get_global_id(0) % r;
+
+ const int out_x = x * BLOCK_SHAPE + (get_global_id(0) / r) % BLOCK_SHAPE;
+ const int out_y = y * BLOCK_SHAPE + (get_global_id(0) / r) / BLOCK_SHAPE;
+
+ *((__global DATA_TYPE *)tensor4D_offset(&out, z, out_x, out_y, batch_id)) = *((__global DATA_TYPE *)in.ptr);
+}
+#endif // defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
diff --git a/src/core/CL/cl_kernels/nhwc/dequantization_layer.cl b/src/core/CL/cl_kernels/nhwc/dequantization_layer.cl
new file mode 100644
index 0000000000..238d3a7921
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/dequantization_layer.cl
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST)
+/** This performs per channel dequantization of 8-bit signed integers to floating point. (NHWC)
+ *
+ * @note Source datatype should be given as a preprocessor argument using -DDATA_TYPE_SRC=type. e.g. -DDATA_TYPE_SRC=char
+ * @note Destination datatype should be given as a preprocessor argument using -DDATA_TYPE_DST=type. e.g. -DDATA_TYPE_DST=float
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: QSYMM8_PER_CHANNEL
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: F16/F32
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] scale Pointer to buffer with the per channel quantized scales
+ */
+__kernel void dequantization_layer_per_channel_nhwc(
+ TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output),
+ __global float *scale)
+{
+ // Get pixels pointer
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+#if defined(LAST_ACCESSED_X)
+ // Check if access on width gets out of bounds
+ // If it does shift access vector to access elements within bounds
+ const int xi = (int)(get_global_id(0) * VEC_SIZE);
+ input.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * input_stride_x;
+ output.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * output_stride_x;
+ scale -= max(xi - (int)LAST_ACCESSED_X, 0);
+
+ // Load data
+ VEC_DATA_TYPE(int, VEC_SIZE)
+ val = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_SRC *)input.ptr), VEC_DATA_TYPE(int, VEC_SIZE));
+
+ // Create scale vectors
+ const VEC_DATA_TYPE(float, VEC_SIZE)
+ vscale = VLOAD(VEC_SIZE)(0, &scale[xi]);
+
+ // Dequantize
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res = vscale * CONVERT((val), VEC_DATA_TYPE(float, VEC_SIZE));
+
+ // Store result
+ VSTORE(VEC_SIZE)
+ (CONVERT(res, VEC_DATA_TYPE(DATA_TYPE_DST, VEC_SIZE)), 0, (__global DATA_TYPE_DST *)output.ptr);
+#else // !defined(LAST_ACCESSED_X)
+ *((__global DATA_TYPE_DST *)(output.ptr)) = (DATA_TYPE_DST)((float)((int)(*((__global DATA_TYPE_SRC *)(input.ptr)))) * scale[get_global_id(0)]);
+#endif // defined(LAST_ACCESSED_X)
+}
+#endif // defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nhwc/direct_convolution.cl b/src/core/CL/cl_kernels/nhwc/direct_convolution.cl
new file mode 100644
index 0000000000..81ceeb8846
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/direct_convolution.cl
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2021-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "helpers_asymm.h"
+#include "tile_helpers.h"
+
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the direct convolution.
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16/QASYMM8/QASYMM8_SIGNED
+ * @note The accumulation data type must be passed at compile time using -DACC_DATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
+ * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
+ * @note The channels of the destination tensor must be passed at compile time using -DDST_CHANNELS (e.g. -DDDST_CHANNELS=64)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
+ * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
+ * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note The zero value must be passed at compile time using -DZERO_VALUE (e.g. -DZERO_VALUE=0)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 5, 6, 7, and 8
+ * - N0 = 2, 3, 4, 8, 16
+ * - K0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
+ *
+ *@note In case of QASYMM8/QASYMM8_SIGNED, the following extra information must be passed at compile time:
+ * - -DIS_QUANTIZED
+ * - The destination quantization multiplier e.g. -DDST_MULTIPLIER=1234
+ * - The destination quantization shift e.g. -DDST_SHIFT=4
+ * - The destination offset e.g. -DDST_OFFSET=4
+ * - The source offset e.g. -DSRC_OFFSET=4
+ * - The weights offset e.g. -DWEI_OFFSET=4
+ * - The quantized zero value e.g. -DZERO_VALUE=4
+ *
+ * @param[in] src_img (Not supported) Read only cl_image object for the source tensor. Included when SRC_TENSOR_TYPE=IMAGE
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32/QASYMM8
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_c The size of the channels dimension of the source tensor
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the batches dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_img (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] wei_img (Optional) Read only cl_image object for the weights tensor. Included when WEI_TENSOR_TYPE=IMAGE
+ * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_c The size of the channels dimension of the weights tensor
+ * @param[in] wei_w The size of the width dimension of the weights tensor
+ * @param[in] wei_h The size of the height dimension of the weights tensor
+ * @param[in] wei_n The size of the batches dimension of the weights tensor
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the weights matrix
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr (if F32/F16) or S32 (if QASYMM8/QASYMM8_SIGNED)
+ * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
+ */
+//! @endcond
+__kernel void direct_convolution_nhwc(
+ TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
+ TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
+ TENSOR4D_RO_T(wei, WEI_TENSOR_TYPE)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+ // All the tensor dimensions are passed at compile time.
+ // In case of dynamic tensor support, the following dimensions should be passed as function argument.
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _ISRC_WIDTH SRC_WIDTH
+#define _ISRC_HEIGHT SRC_HEIGHT
+#define _ISRC_CHANNELS SRC_CHANNELS
+#define _IDST_WIDTH DST_WIDTH
+#define _IDST_HEIGHT DST_HEIGHT
+#define _IDST_CHANNELS DST_CHANNELS
+#define _IY_MULTIPLIER (_IWEI_WIDTH * _IWEI_HEIGHT)
+
+ // If quantized, the output tile has to be quantized first before being stored to global memory
+#if defined(IS_QUANTIZED)
+#define _IOUTPUT_TILE cq
+#else // defined(IS_QUANTIZED)
+#define _IOUTPUT_TILE c
+#endif // defined(IS_QUANTIZED)
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, M0, 0); // WIDTH x HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+
+ // .v = access the whole vector (OpenCL vector)
+ // .s[x] = access the vector element at position x (scalar access)
+ TILE(int, 1, M0, xi);
+ TILE(int, 1, M0, yi);
+
+ // Convert the linear index to coordinate
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ xi[0].s[i] = ((mout + i) % _IDST_WIDTH) * STRIDE_X;
+ yi[0].s[i] = ((mout + i) / _IDST_WIDTH) * STRIDE_Y;
+ xi[0].s[i] -= PAD_LEFT;
+ yi[0].s[i] -= PAD_TOP;
+ })
+
+ // Initialize the accumulators
+ TILE(ACC_DATA_TYPE, M0, N0, c);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+ for(int i = 0; i < (_IWEI_WIDTH * _IWEI_HEIGHT); ++i)
+ {
+ int xk = i % _IWEI_WIDTH;
+ int yk = i / _IWEI_WIDTH;
+
+ TILE(int, 1, M0, my);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ int x_s = xi[0].s[i] + xk;
+ int y_s = yi[0].s[i] + yk;
+ my[0].s[i] = x_s + y_s *_ISRC_WIDTH;
+ my[0].s[i] = my[0].s[i] + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
+ my[0].s[i] = select(-1, my[0].s[i], x_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], x_s < _ISRC_WIDTH);
+ my[0].s[i] = select(-1, my[0].s[i], y_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], y_s < _ISRC_HEIGHT);
+ })
+
+ int ck = 0;
+ for(; ck <= (_ISRC_CHANNELS - K0); ck += K0)
+ {
+ TILE(SRC_DATA_TYPE, M0, K0, a);
+ TILE(WEI_DATA_TYPE, N0, K0, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = ZERO_VALUE;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = ZERO_VALUE;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
+
+ // Apply the offset correction (correction usually needed for asymmetric quantized computation)
+ // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
+ T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, a, b, c);
+ }
+
+ // This #if directive should be removed in case of dynamic tensor support
+#if defined(LEFTOVER_LOOP)
+ // Left-over accumulations
+ for(; ck < _ISRC_CHANNELS; ++ck)
+ {
+ TILE(SRC_DATA_TYPE, M0, 1, a);
+ TILE(WEI_DATA_TYPE, N0, 1, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = ZERO_VALUE;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = ZERO_VALUE;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ // The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
+ T_LOAD(WEI_DATA_TYPE, N0, 1, BUFFER, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, ACC_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
+
+ // Apply the offset correction (operation usually needed for asymmetric quantized computation)
+ // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
+ T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, 1, SRC_OFFSET, WEI_OFFSET, a, b, c);
+ }
+#endif // defined(LEFTOVER_LOOP)
+ }
+
+ // Offset correction required for the quantized asymmetric computation
+ // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
+ T_ADD_CONSTANT(ACC_DATA_TYPE, M0, N0, c, (_IWEI_WIDTH * _IWEI_HEIGHT * _ISRC_CHANNELS * SRC_OFFSET * WEI_OFFSET), c);
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 1, 0, bias0);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(ACC_DATA_TYPE, M0, N0, c, bias0, c);
+
+#endif // HAS_BIAS
+
+#if defined(IS_QUANTIZED)
+
+ TILE(DST_DATA_TYPE, M0, N0, cq);
+
+ // Quantize the tile
+ T_QUANTIZE8_ASYMMETRIC(ACC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
+#endif // defined(IS_QUANTIZED)
+
+ // Apply activation
+ T_ACTIVATION(DST_DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, _IOUTPUT_TILE, _IOUTPUT_TILE);
+
+ TILE(uint, M0, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH * _IDST_HEIGHT) - 1);
+ dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
+ })
+
+ bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ // _IOUTPUT_TILE: c = fp32/fp16, cq=qasymm8
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DST_DATA_TYPE, M0, N0, PARTIAL_N0, DST_TENSOR_TYPE, dst, cout, dst_stride_y, x_cond, _IOUTPUT_TILE, dst_indirect_y);
+
+#undef _IWEI_WIDTH
+#undef _IWEI_HEIGHT
+#undef _ISRC_WIDTH
+#undef _ISRC_HEIGHT
+#undef _ISRC_CHANNELS
+#undef _IDST_WIDTH
+#undef _IDST_HEIGHT
+#undef _IDST_CHANNELS
+#undef _IY_MULTIPLIER
+}
diff --git a/src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl b/src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl
new file mode 100644
index 0000000000..807b990e82
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2021-2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "helpers.h"
+#include "tile_helpers.h"
+
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the direct convolution 3d.
+ *
+ * @note Data layout supported: NDHWC
+ * @note Data type supported: F32/F16/QASYMM8/QASYMM8_SIGNED
+ * @note The accumulation data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half)
+ * @note The convolution padding (left, top and front) must be passed at compile time using -DPAD_LEFT, -DPAD_TOP and -DPAD_FRONT (e.g. -DPAD_LEFT=2, -DPAD_TOP=2, -DPAD_FRONT=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE_X, -DSTRIDE_Y and -DSTRIDE_Z (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2, -DSTRIDE_Z=2)
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH, -DWEI_HEIGHT and -DWEI_DEPTH (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9, -DWEI_DEPTH=9)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH, -DSRC_HEIGHT and -DSRC_DEPTH (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64, -DSRC_DEPTH=32)
+ * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH, -DDST_HEIGHT and -DDST_DEPTH (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64, -DDST_DEPTH=32)
+ * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
+ * @note The channels of the destination tensor must be passed at compile time using -DDST_CHANNELS (e.g. -DDST_CHANNELS=64)
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
+ * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
+ * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note The zero value must be passed at compile time using -DZERO_VALUE (e.g. -DZERO_VALUE=0)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 5, .... n
+ * - N0 = 2, 3, 4, 8, 16
+ * - K0 = 2, 3, 4, 8, 16
+ *
+ * @note In case of QASYMM8/QASYMM8_SIGNED, the following extra information must be passed at compile time:
+ * - -DIS_QUANTIZED
+ * - The destination quantization multiplier e.g. -DDST_MULTIPLIER=1234
+ * - The destination quantization shift e.g. -DDST_SHIFT=4
+ * - The destination offset e.g. -DDST_OFFSET=4
+ * - The source offset e.g. -DSRC_OFFSET=4
+ * - The weights offset e.g. -DWEI_OFFSET=4
+ * - The quantized zero value e.g. -DZERO_VALUE=4
+ *
+ * @note If biases are used then -DHAS_BIAS has to be passed at compile time along with its tensor type by using -DBIA_DATA_TYPE (e.g. -DBIA_DATA_TYPE=int).
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
+ * @param[in] wei_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] wei_step_x wei_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_step_y wei_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_step_z wei_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_step_w wei_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the weights matrix
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr
+ * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
+ */
+//! @endcond
+__kernel void direct_convolution3d_ndhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ TENSOR4D(wei, BUFFER)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _IWEI_DEPTH WEI_DEPTH
+#define _ISRC_WIDTH SRC_WIDTH
+#define _ISRC_HEIGHT SRC_HEIGHT
+#define _ISRC_DEPTH SRC_DEPTH
+#define _ISRC_CHANNELS SRC_CHANNELS
+#define _IDST_WIDTH DST_WIDTH
+#define _IDST_HEIGHT DST_HEIGHT
+#define _IDST_DEPTH DST_DEPTH
+#define _IDST_CHANNELS DST_CHANNELS
+#define _IY_MULTIPLIER (_IWEI_WIDTH * _IWEI_HEIGHT * _IWEI_DEPTH)
+
+ // If quantized, the output tile has to be quantized first before being stored to global memory
+#if defined(IS_QUANTIZED)
+#define _IOUTPUT_TILE cq
+#else // defined(IS_QUANTIZED)
+#define _IOUTPUT_TILE c
+#endif // defined(IS_QUANTIZED)
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, M0, 0); // WIDTH x HEIGHT x DEPTH
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+
+ TILE(int, M0, 1, xi);
+ TILE(int, M0, 1, yi);
+ TILE(int, M0, 1, zi);
+
+ // Convert the linear index to coordinate
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ xi[i].v = ((mout + i) % _IDST_WIDTH) * STRIDE_X;
+ yi[i].v = (((mout + i) / _IDST_WIDTH) % _IDST_HEIGHT) * STRIDE_Y;
+ zi[i].v = (((mout + i) / (_IDST_WIDTH * _IDST_HEIGHT)) % _IDST_DEPTH) * STRIDE_Z;
+
+ xi[i].v -= PAD_LEFT;
+ yi[i].v -= PAD_TOP;
+ zi[i].v -= PAD_FRONT;
+ })
+
+ // Initialize the accumulators
+ TILE(ACC_DATA_TYPE, M0, N0, c);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = (ACC_DATA_TYPE)0;
+ })
+
+ for(int i = 0; i < _IY_MULTIPLIER; ++i)
+ {
+ int ck = 0;
+ int xk = i % _IWEI_WIDTH;
+ int yk = (i / _IWEI_WIDTH) % _IWEI_HEIGHT;
+ int zk = i / (_IWEI_WIDTH * _IWEI_HEIGHT);
+
+ int k = 0;
+ for(; k <= (_ISRC_CHANNELS - K0); k += K0)
+ {
+ TILE(DATA_TYPE, M0, K0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = ZERO_VALUE;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD_NDHWC_INDIRECT(DATA_TYPE, M0, K0, BUFFER, src, bout, zk, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, _ISRC_DEPTH, src_stride_y, xi, yi, zi, a);
+
+ // Load tile from the weights tensor
+ const int b_offs = k + (xk * _ISRC_CHANNELS) + (yk * _ISRC_CHANNELS * _IWEI_WIDTH) + (zk * _ISRC_CHANNELS * _IWEI_WIDTH * _IWEI_HEIGHT);
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ if((cout + i) < _IDST_CHANNELS)
+ {
+ LOOP_UNROLLING(int, j, 0, 1, K0,
+ {
+ b[i].s[j] = *(__global DATA_TYPE *)(wei_ptr + wei_offset_first_element_in_bytes + (cout + i) * sizeof(DATA_TYPE) + j * wei_stride_y + b_offs * wei_stride_y);
+ })
+ }
+ })
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
+
+ // Apply the offset correction (correction usually needed for asymmetric quantized computation)
+ // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
+ T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, a, b, c);
+
+ ck += K0;
+ }
+
+#if((_ISRC_CHANNELS % K0) != 0)
+ // Left-over accumulations
+ for(; k < _ISRC_CHANNELS; ++k)
+ {
+ TILE(DATA_TYPE, M0, 1, a);
+ TILE(DATA_TYPE, N0, 1, b);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = ZERO_VALUE;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD_NDHWC_INDIRECT(DATA_TYPE, M0, 1, BUFFER, src, bout, zk, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, _ISRC_DEPTH, src_stride_y, xi, yi, zi, a);
+
+ // Load tile from the weights tensor
+ const int b_offs = k + (xk * _ISRC_CHANNELS) + (yk * _ISRC_CHANNELS * _IWEI_WIDTH) + (zk * _ISRC_CHANNELS * _IWEI_WIDTH * _IWEI_HEIGHT);
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ if((cout + i) < _IDST_CHANNELS)
+ {
+ b[i].v = *(__global DATA_TYPE *)(wei_ptr + wei_offset_first_element_in_bytes + (cout + i) * sizeof(DATA_TYPE) + b_offs * wei_stride_y);
+ }
+ })
+
+ // // Compute the matrix multiplication between two tiles
+ T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
+
+ // Apply the offset correction (operation usually needed for asymmetric quantized computation)
+ // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
+ T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, 1, SRC_OFFSET, WEI_OFFSET, a, b, c);
+
+ ++ck;
+ }
+#endif // ((_ISRC_CHANNELS % K0) != 0)
+ }
+
+ // Offset correction required for the quantized asymmetric computation
+ // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
+ T_ADD_CONSTANT(ACC_DATA_TYPE, M0, N0, c, (_IWEI_WIDTH * _IWEI_HEIGHT * _IWEI_DEPTH * _ISRC_CHANNELS * SRC_OFFSET * WEI_OFFSET), c);
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ if((cout + N0) <= _IDST_CHANNELS)
+ {
+ bias0[0].v = VLOAD(N0)(0, (__global BIA_DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes + cout * sizeof(BIA_DATA_TYPE)));
+ }
+ else
+ {
+ VLOAD_PARTIAL(N0, PARTIAL_N0)
+ (bias0[0].v, 0, (__global BIA_DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes + cout * sizeof(BIA_DATA_TYPE)));
+ }
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(ACC_DATA_TYPE, M0, N0, c, bias0, c);
+
+#endif // HAS_BIAS
+
+ TILE(uint, M0, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH *_IDST_HEIGHT * _IDST_DEPTH) - 1);
+ dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH *_IDST_HEIGHT * _IDST_DEPTH);
+ })
+
+#if defined(IS_QUANTIZED)
+ TILE(DATA_TYPE, M0, N0, cq);
+
+ // Quantize the tile
+ T_QUANTIZE8_ASYMMETRIC(ACC_DATA_TYPE, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
+#endif // defined(IS_QUANTIZED)
+
+ bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_N0, BUFFER, dst, cout, dst_stride_y, x_cond, _IOUTPUT_TILE, dst_indirect_y);
+} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl b/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl
new file mode 100644
index 0000000000..dcbae220b6
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2021-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+// *INDENT-OFF*
+// clang-format off
+#if defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP)
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the depthwise convolution for floating-point data types (F32/F16)
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The accumulation data type must be passed at compile time using -DACC_DATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
+ * @note The convolution dilations must be passed at compile time using -DDILATION_X and -DDILATION_Y (e.g. -DDILATION_X=2, -DDILATION_Y=2)
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
+ * @note The number of M0 rows (width) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The size of the partial store block in the first dimension must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note Only the following configurations of M0 and N0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 5, .... n (M0 != 1 with STRIDE_X == 1 && DILATION_X == 1 only)
+ * - N0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
+ * @note The number of rows to read from the src tensor must be passed at compile time using -DM0_A (e.g., -DM0_A=3). M0_A must be equal to WEI_WIDTH + (M0 - 1)
+ * @note The number of columns to read from the src tensor must be passed at compile time using -DN0_A. It can either be 1 (for DEPTH_MULTIPLIER > 1) or N0 (for DEPTH_MULTIPLIER == 1)
+ *
+ * @param[in] src_img (Not supported) Read only cl_image object for the source tensor. Included when SRC_TENSOR_TYPE=IMAGE
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_c The size of the channels dimension of the source tensor
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the batches dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_img (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] wei_img (Optional) Read only cl_image object for the weights tensor. Included when WEI_TENSOR_TYPE=IMAGE
+ * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_c The size of the channels dimension of the weights tensor
+ * @param[in] wei_w The size of the width dimension of the weights tensor
+ * @param[in] wei_h The size of the height dimension of the weights tensor
+ * @param[in] wei_n The size of the batches dimension of the weights tensor
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the weigts matrix
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr
+ * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
+ */
+//! @endcond
+__kernel void dwc_native_fp_nhwc(
+ TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
+ TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
+ TENSOR4D_RO_T(wei, WEI_TENSOR_TYPE)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+ // Only the weight tensor dimensions are passed at compile time.
+ // In case of dynamic tensor support, the following dimensions should be passed as function argument.
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _IM0_A M0_A // _IWEI_WIDTH + (M0 - 1) Rows tile A (If M0 != 1, the tiles overlap of 1 element on the X dimension)
+#define _IN0_A N0_A // Cols tile A. It can be either 1 (for DEPTH_MULTIPLIER > 1) or N0 (for DEPTH_MULTIPLIER == 1)
+#define _IM0_B _IWEI_WIDTH // Rows tile B
+#define _IN0_B N0 // Cols tile B
+#define _IBOUNDARY_CHECK (!((WEI_WIDTH == 1 && WEI_HEIGHT == 1 && PAD_LEFT == 0 && PAD_TOP == 0 && M0 == 1)))
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int xo = GET_SPATIAL_IDX(1, M0, 0); // WIDTH
+#if defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0) % dst_h; // HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0) / dst_h; // BATCH SIZE IDX
+#else // defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0); // HEIGHT
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(BATCHED_EXECUTION)
+
+ int xi = xo * STRIDE_X;
+ int yi = yo * STRIDE_Y;
+ xi -= PAD_LEFT;
+ yi -= PAD_TOP;
+
+ TILE(ACC_DATA_TYPE, M0, N0, c);
+
+ // Reset accumulators
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+#if _IWEI_HEIGHT < 5
+ LOOP_UNROLLING(int, yk, 0, 1, _IWEI_HEIGHT,
+#else // _IWEI_HEIGHT <= 5
+ for(int yk = 0; yk < _IWEI_HEIGHT; ++yk)
+#endif // _IWEI_HEIGHT <= 5
+ {
+ TILE(SRC_DATA_TYPE, _IM0_A, _IN0_A, a);
+
+ LOOP_UNROLLING(int, i, 0, 1, _IM0_A,
+ {
+ a[i].v = 0;
+ })
+
+ // Load tile from the src tensor (TILE A)
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, _IM0_A, _IN0_A, SRC_TENSOR_TYPE, src, bout, yi + yk * DILATION_Y, xi, (cout / DEPTH_MULTIPLIER), SRC_WIDTH, SRC_HEIGHT, DILATION_X, 1, _IBOUNDARY_CHECK, a);
+
+ TILE(WEI_DATA_TYPE, _IM0_B, _IN0_B, b);
+
+ // Load tile from the weights tensor (TILE B)
+ T_LOAD(WEI_DATA_TYPE, _IM0_B, _IN0_B, WEI_TENSOR_TYPE, wei, cout, yk * _IM0_B, 1, wei_stride_y, b);
+
+ // Optimized path for STRIDE_X == 1
+ // If M0 != 1, we can skip the common loads between the two applied kernels on the X (WIDTH) dimension
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, xk, 0, 1, _IWEI_WIDTH,
+ {
+#if GPU_ARCH == GPU_ARCH_MIDGARD
+ c[m0].v += a[xk + m0].v * b[xk].v;
+#else // GPU_ARCH == GPU_ARCH_MIDGARD
+ c[m0].v = fma(a[xk + m0].v, b[xk].v, c[m0].v);
+#endif // GPU_ARCH == GPU_ARCH_MIDGARD
+ })
+ })
+ }
+#if _IWEI_HEIGHT < 5
+ )
+#endif // _IWEI_HEIGHT <= 5
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 0, 0, bias0);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(ACC_DATA_TYPE, M0, N0, c, bias0, c);
+#endif // HAS_BIAS
+
+ T_ACTIVATION(ACC_DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
+
+ TILE(uint, M0, 1, dst_indirect_y);
+
+ bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ if(x_cond)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ int xi_out = min(xo + M0 - 1 - m0, (int)(DST_WIDTH) - 1);
+ VSTORE_PARTIAL(N0, PARTIAL_N0)
+ (c[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + cout * sizeof(DST_DATA_TYPE) + (uint)xi_out * dst_stride_y + (uint)yo * dst_stride_z + (uint)bout * dst_stride_w));
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ int xi_out = min(xo + M0 - 1 - m0, (int)(DST_WIDTH) - 1);
+ VSTORE(N0)
+ (c[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + cout * sizeof(DST_DATA_TYPE) + (uint)xi_out * dst_stride_y + (uint)yo * dst_stride_z + (uint)bout * dst_stride_w));
+ })
+ }
+}
+#endif // defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP)
+// *INDENT-ON*
+// clang-format on
diff --git a/src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl b/src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl
new file mode 100644
index 0000000000..2d255e5b61
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2021-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "helpers.h"
+#include "tile_helpers.h"
+
+// *INDENT-OFF*
+// clang-format off
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION(A_DATA_TYPE, B_DATA_TYPE) CALCULATE_WEIGHTS_OFFSET_CORRECTION_STR(A_DATA_TYPE, B_DATA_TYPE)
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_STR(A_DATA_TYPE, B_DATA_TYPE) CALCULATE_WEIGHTS_OFFSET_CORRECTION_##A_DATA_TYPE##_##B_DATA_TYPE
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_char_char (0)
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_uchar_uchar (0)
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_uchar_char (128)
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_char_uchar (-128)
+
+#define T_LOAD_MULTIPLIERS_SHIFT_PER_TENSOR() \
+ ({})
+
+#define T_LOAD_MULTIPLIERS_SHIFT_PER_CHANNEL() \
+ TILE(DST_MULTIPLIERS_DATA_TYPE, 1, N0, multipliers); \
+ TILE(DST_SHIFTS_DATA_TYPE, 1, N0, shifts); \
+ T_LOAD(DST_MULTIPLIERS_DATA_TYPE, 1, N0, BUFFER, dst_multipliers, cout, 0, 0, 0, multipliers); \
+ T_LOAD(DST_SHIFTS_DATA_TYPE, 1, N0, BUFFER, dst_shifts, cout, 0, 0, 0, shifts);
+
+#define T_LOAD_MULTIPLIERS_SHIFT(QUANTIZATION_TYPE) T_LOAD_MULTIPLIERS_SHIFT_STR(QUANTIZATION_TYPE)
+#define T_LOAD_MULTIPLIERS_SHIFT_STR(QUANTIZATION_TYPE) T_LOAD_MULTIPLIERS_SHIFT_##QUANTIZATION_TYPE()
+
+#if defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP)
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the depthwise convolution for quantized data types
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: QSYMM8/QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
+ * @note The convolution dilations must be passed at compile time using -DDILATION_X and -DDILATION_Y (e.g. -DDILATION_X=2, -DDILATION_Y=2)
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=int8)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=int8)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=int8)
+ * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=int)
+ * @note The number of M0 rows (width) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The size of the partial store block in the first dimension must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note The activation type must be passed at compile using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
+ * @note The A and B variables required by some activation functions must be passed at compile time using -DA_VAL= and -DB_VAL= respectively
+ * @note The quantization offset used for both the per-tensor and per-channel quantization must be passed at compile using -DDST_OFFSET (e.g., -DDST_OFFSET=3)
+ * @note The quantization shift for the per-tensor quantization must be passed at compile time using -DDST_SHIFT (e.g., -DDST_SHIFT=1)
+ * @note The quantization multiplier for the per-tensor quantization must be passed at compile using -DDST_MULTIPLIER (e.g., -DDST_MULTIPLER=121432)
+ * @note Only the following configurations of M0 and N0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 5, .... n (M0 != 1 with STRIDE_X == 1 && DILATION_X == 1 only)
+ * - N0 = 2, 3, 4, 8, 16
+ * @note The number of rows to read from the src tensor must be passed at compile time using -DM0_A (e.g., -DM0_A=3). M0_A must be equal to WEI_WIDTH + (M0 - 1)
+ * @note The number of columns to read from the src tensor must be passed at compile time using -DN0_A. It can either be 1 (for DEPTH_MULTIPLIER > 1) or N0 (for DEPTH_MULTIPLIER == 1)
+ *
+ * @param[in] src_img (Not supported) Read only cl_image object for the source tensor. Included when SRC_TENSOR_TYPE=IMAGE
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: QSYMM8/QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_c The size of the channels dimension of the source tensor
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the batches dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_img (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] wei_img (Not supported) Read only cl_image object for the weights tensor. Included when WEI_TENSOR_TYPE=IMAGE
+ * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_c The size of the channels dimension of the weights tensor
+ * @param[in] wei_w The size of the width dimension of the weights tensor
+ * @param[in] wei_h The size of the height dimension of the weights tensor
+ * @param[in] wei_n The size of the batches dimension of the weights tensor
+ * @param[in] wei_step_w wei_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] dst_multipliers_ptr Pointer to the destination multipliers tensor for the per-channel quantization. Supported data type: S32
+ * @param[in] dst_multipliers_stride_x Stride of the destination multipliers tensor in X dimension (in bytes)
+ * @param[in] dst_multipliers_step_x dst_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_multipliers_offset_first_element_in_bytes The offset of the first element in the destination multipliers tensor
+ * @param[in] dst_shifts_ptr Pointer to the destination shifts tensor for the per-channel quantization. Supported data type: S32
+ * @param[in] dst_shifts_stride_x Stride of the destination shifts tensor in X dimension (in bytes)
+ * @param[in] dst_shifts_step_x dst_shifts_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_shifts_offset_first_element_in_bytes The offset of the first element in the destination shifts tensor
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: S32
+ * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ */
+//! @endcond
+__kernel void dwc_native_quantized_nhwc(
+ TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
+ TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
+ TENSOR4D_RO_T(wei, WEI_TENSOR_TYPE),
+ VECTOR_DECLARATION(dst_multipliers),
+ VECTOR_DECLARATION(dst_shifts)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+ // Only the weight tensor dimensions are passed at compile time.
+ // In case of dynamic tensor support, the following dimensions should be passed as function argument.
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _IM0_A M0_A // _IWEI_WIDTH + (M0 - 1) Rows tile A (If M0 != 1, the tiles overlap of 1 element on the X dimension)
+#define _IN0_A N0_A // Cols tile A. It can be either 1 (for DEPTH_MULTIPLIER > 1) or N0 (for DEPTH_MULTIPLIER == 1)
+#define _IM0_B _IWEI_WIDTH // Rows tile B
+#define _IN0_B N0 // Cols tile B
+#define _IBOUNDARY_CHECK (!((WEI_WIDTH == 1 && WEI_HEIGHT == 1 && PAD_LEFT == 0 && PAD_TOP == 0 && M0 == 1)))
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int xo = GET_SPATIAL_IDX(1, M0, 0); // WIDTH
+#if defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0) % dst_h; // HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0) / dst_h; // BATCH SIZE IDX
+#else // defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0); // HEIGHT
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(BATCHED_EXECUTION)
+
+ int xi = xo * STRIDE_X;
+ int yi = yo * STRIDE_Y;
+ xi -= PAD_LEFT;
+ yi -= PAD_TOP;
+
+ TILE(ACC_DATA_TYPE, M0, N0, c);
+
+ // Reset accumulators
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+#if _IWEI_HEIGHT <= 5
+ LOOP_UNROLLING(int, yk, 0, 1, _IWEI_HEIGHT,
+#else // _IWEI_HEIGHT <= 5
+ for(int yk = 0; yk < _IWEI_HEIGHT; yk++)
+#endif // _IWEI_HEIGHT <= 5
+ {
+ TILE(SRC_DATA_TYPE, _IM0_A, _IN0_A, a);
+
+ LOOP_UNROLLING(int, i, 0, 1, _IM0_A,
+ {
+ a[i].v = ZERO_VALUE;
+ })
+
+ // Load tile from the src tensor (TILE A)
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, _IM0_A, _IN0_A, SRC_TENSOR_TYPE, src, bout, yi + yk * DILATION_Y, xi, (cout / DEPTH_MULTIPLIER), src_w, src_h, DILATION_X, 1, _IBOUNDARY_CHECK, a);
+
+ TILE(WEI_DATA_TYPE, _IM0_B, _IN0_B, b);
+
+ // Load tile from the weights tensor (TILE B)
+ T_LOAD(WEI_DATA_TYPE, _IM0_B, _IN0_B, WEI_TENSOR_TYPE, wei, cout, yk * _IM0_B, 1, wei_stride_y, b);
+
+ // Optimized path for STRIDE_X == 1
+ // If M0 != 1, we can skip the common loads between the two applied kernels on the X (WIDTH) dimension
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+#if _IWEI_WIDTH <= 16
+#define DOT_DATA_TYPE SRC_DATA_TYPE
+#define WEI_OFFSET_CORRECTION (CALCULATE_WEIGHTS_OFFSET_CORRECTION(SRC_DATA_TYPE, WEI_DATA_TYPE))
+
+ // Optimized path for the dot instruction
+ TILE(DOT_DATA_TYPE, 1, _IWEI_WIDTH, x0);
+ TILE(DOT_DATA_TYPE, 1, _IWEI_WIDTH, y0);
+ ACC_DATA_TYPE offset_a = 0;
+ ACC_DATA_TYPE offset_b = 0;
+
+ LOOP_UNROLLING(int, xk, 0, 1, _IWEI_WIDTH,
+ {
+ x0[0].s[xk] = a[xk + m0].s[n0];
+ y0[0].s[xk] = b[xk].s[n0] + (int)WEI_OFFSET_CORRECTION;
+ })
+ DOT_PRODUCT_INTEGER8(DOT_DATA_TYPE, DOT_DATA_TYPE, ACC_DATA_TYPE, _IWEI_WIDTH, x0[0].v, y0[0].v, c[m0].s[n0]);
+ REDUCE_INTEGER8(DOT_DATA_TYPE, DOT_DATA_TYPE, ACC_DATA_TYPE, _IWEI_WIDTH, x0[0].v, offset_a);
+ REDUCE_INTEGER8(DOT_DATA_TYPE, DOT_DATA_TYPE, ACC_DATA_TYPE, _IWEI_WIDTH, y0[0].v, offset_b);
+ c[m0].s[n0] += offset_a * (ACC_DATA_TYPE)(WEI_OFFSET - (ACC_DATA_TYPE)WEI_OFFSET_CORRECTION) + offset_b * (ACC_DATA_TYPE)SRC_OFFSET;
+#else // _IWEI_WIDTH <= 16
+ LOOP_UNROLLING(int, xk, 0, 1, _IWEI_WIDTH,
+ {
+ c[m0].s[n0] += ((ACC_DATA_TYPE)a[xk + m0].s[n0] + (ACC_DATA_TYPE)(SRC_OFFSET)) * ((ACC_DATA_TYPE)b[xk].s[n0] + (ACC_DATA_TYPE)(WEI_OFFSET));
+ })
+#endif // _IWEI_WIDTH <= 16
+ })
+ })
+ }
+#if _IWEI_HEIGHT <= 5
+ )
+#endif // _IWEI_HEIGHT <= 5
+
+#if _IWEI_WIDTH <= 16
+ T_ADD_CONSTANT(ACC_DATA_TYPE, M0, N0, c, (_IWEI_WIDTH * _IWEI_HEIGHT * SRC_OFFSET * (ACC_DATA_TYPE)(WEI_OFFSET - (ACC_DATA_TYPE)WEI_OFFSET_CORRECTION)), c);
+#endif // _IWEI_WIDTH <= 16
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ // Load bias
+ T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 0, 0, bias0);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(ACC_DATA_TYPE, M0, N0, c, bias0, c);
+#endif // HAS_BIAS
+
+ T_LOAD_MULTIPLIERS_SHIFT(QUANTIZATION_TYPE);
+
+ // Quantize the tile
+ TILE(DST_DATA_TYPE, M0, N0, cq);
+ T_QUANTIZE8(ACC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, multipliers, shifts, cq);
+
+ // Perform activation
+ T_ACTIVATION_QUANTIZED(DST_DATA_TYPE, M0, N0, ACTIVATION_TYPE, DST_OFFSET, A_VAL, B_VAL, cq, cq);
+
+ bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ if(x_cond)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ int xi_out = min(xo + M0 - 1 - m0, (int)(dst_w) - 1);
+ VSTORE_PARTIAL(N0, PARTIAL_N0)
+ (cq[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + (uint)cout * sizeof(DST_DATA_TYPE) + (uint)xi_out * dst_stride_y + (uint)yo * dst_stride_z + (uint)bout * dst_stride_w));
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ int xi_out = min(xo + M0 - 1 - m0, (int)(dst_w) - 1);
+ VSTORE(N0)
+ (cq[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + (uint)cout * sizeof(DST_DATA_TYPE) + (uint)xi_out * dst_stride_y + (uint)yo * dst_stride_z + (uint)bout * dst_stride_w));
+ })
+ }
+}
+#endif // defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP)
+// *INDENT-ON*
+// clang-format on
diff --git a/src/core/CL/cl_kernels/nhwc/im2col.cl b/src/core/CL/cl_kernels/nhwc/im2col.cl
new file mode 100644
index 0000000000..a23e943fab
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/im2col.cl
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#define VECTOR_N VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
+#define COND_N SIGNED_INT_VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
+
+#if defined(IM2COL_3X3) || defined(IM2COL_9X9)
+/** Store a 1x9 row or a 3x3 block in a boundary-aware manner to avoid paddings in the channel dimension
+ * @name IM2COL1X9_NHWC_STORE
+ *
+ * @note To use this macro for a 3x3 block, @p ROW has to be 0
+ *
+ * @param[in] VECTOR_SIZE The non-boundary vector width of @p DATA. Supported: 1(scalar), 2, 3, 4, 8, 16
+ * @param[in] BOUNDARY_VECTOR_SIZE The boundary vector width of @p DATA. Supported: 1-16, but has to be <= @p size
+ * @param[in] DATA_TYPE Data type of @p DATA
+ * @param[in] SRC_DEPTH Input channel size / depth
+ * @param[in] DATA Value variable base name
+ * @param[in] ROW The row number to store. Supported: 0-8
+ * @param[in] OUTPUT_PTR Output pointer
+ * @{
+ */
+#if defined(VECTOR_SIZE) && defined(BOUNDARY_VECTOR_SIZE) && BOUNDARY_VECTOR_SIZE < VECTOR_SIZE
+#define IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
+ const bool at_channel_boundary = get_global_id(0) == 0; \
+ if(at_channel_boundary) \
+ { \
+ IM2COL1X9_NHWC_STORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
+ } \
+ else \
+ { \
+ IM2COL1X9_NHWC_STORE_NONPARTIAL(VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
+ }
+#else // defined(VECTOR_SIZE) && defined(BOUNDARY_VECTOR_SIZE) && BOUNDARY_VECTOR_SIZE < VECTOR_SIZE
+#define IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
+ IM2COL1X9_NHWC_STORE_NONPARTIAL(VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR)
+#endif // defined(VECTOR_SIZE) && defined(BOUNDARY_VECTOR_SIZE) && BOUNDARY_VECTOR_SIZE < VECTOR_SIZE
+
+#define IM2COL1X9_NHWC_STORE_NONPARTIAL(VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
+ VSTORE(VECTOR_SIZE) \
+ (DATA##0, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (0 + ROW * 9) * SRC_DEPTH); \
+ VSTORE(VECTOR_SIZE) \
+ (DATA##1, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (1 + ROW * 9) * SRC_DEPTH); \
+ VSTORE(VECTOR_SIZE) \
+ (DATA##2, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (2 + ROW * 9) * SRC_DEPTH); \
+ VSTORE(VECTOR_SIZE) \
+ (DATA##3, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (3 + ROW * 9) * SRC_DEPTH); \
+ VSTORE(VECTOR_SIZE) \
+ (DATA##4, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (4 + ROW * 9) * SRC_DEPTH); \
+ VSTORE(VECTOR_SIZE) \
+ (DATA##5, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (5 + ROW * 9) * SRC_DEPTH); \
+ VSTORE(VECTOR_SIZE) \
+ (DATA##6, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (6 + ROW * 9) * SRC_DEPTH); \
+ VSTORE(VECTOR_SIZE) \
+ (DATA##7, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (7 + ROW * 9) * SRC_DEPTH); \
+ VSTORE(VECTOR_SIZE) \
+ (DATA##8, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (8 + ROW * 9) * SRC_DEPTH);
+
+#define IM2COL1X9_NHWC_STORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, DATA, ROW, OUTPUT_PTR) \
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
+ (DATA##0, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (0 + ROW * 9) * SRC_DEPTH); \
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
+ (DATA##1, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (1 + ROW * 9) * SRC_DEPTH); \
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
+ (DATA##2, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (2 + ROW * 9) * SRC_DEPTH); \
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
+ (DATA##3, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (3 + ROW * 9) * SRC_DEPTH); \
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
+ (DATA##4, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (4 + ROW * 9) * SRC_DEPTH); \
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
+ (DATA##5, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (5 + ROW * 9) * SRC_DEPTH); \
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
+ (DATA##6, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (6 + ROW * 9) * SRC_DEPTH); \
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
+ (DATA##7, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (7 + ROW * 9) * SRC_DEPTH); \
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE) \
+ (DATA##8, 0, (__global DATA_TYPE *)(OUTPUT_PTR) + (8 + ROW * 9) * SRC_DEPTH);
+/** @}*/
+#endif // defined(IM2COL_3X3) || defined(IM2COL_9X9)
+
+#if defined(IM2COL_3X3)
+/** This kernel performs im2col when the kernel size is 3x3 and the data layout is NHWC
+ *
+ * @note This kernel computes VECTOR_SIZE elements
+ * @note This kernel stores VECTOR_SIZE or BOUNDARY_VECTOR_SIZE (if at boundary) elements
+ * @note The vector size must be passed at compile time using -DVECTOR_SIZE: e.g. -DVECTOR_SIZE=2
+ * @note The boundary vector size must be passed at compile time using -DBOUNDARY_VECTOR_SIZE: e.g. -DBOUNDARY_VECTOR_SIZE=1
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The width of output tensor after matrix multiplication must be passed at compile time using -DCONVOLVED_WIDTH: e.g. -DCONVOLVED_WIDTH=34
+ * @note The kernel depth must be passed at compile time using -DSRC_DEPTH: e.g. -DSRC_DEPTH=3
+ * @note The stride along the Y direction must be passed at compile time using -DSTRIDE_Y: e.g. -DSTRIDE_Y=1
+ * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8_SIGNED/QASYMM8/F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes).
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes).
+ */
+__kernel void im2col3x3_nhwc(
+ TENSOR3D_DECLARATION(src),
+ IMAGE_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ // input feature map, boundary-corrected (shift all non-boundary vectors by shift_amount) to avoid padding
+ const int shift_amount = (int)VECTOR_SIZE - (int)BOUNDARY_VECTOR_SIZE;
+ const int ch = max((int)(get_global_id(0) * VECTOR_SIZE) - shift_amount, 0);
+ const int yo = get_global_id(1);
+ const int batch = get_global_id(2); // batch size
+
+ // Calculate input indices
+ const int xi = (get_global_id(1) % CONVOLVED_WIDTH) * STRIDE_X;
+ const int yi = (get_global_id(1) / (int)CONVOLVED_WIDTH) * STRIDE_Y;
+
+ // Get input and output address
+ __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + batch * (int)src_stride_w;
+ __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + yo * (int)dst_stride_y + batch * (int)dst_stride_w;
+
+ int yi_coord = 0;
+ int3 offset = 0;
+
+ // Clamp xi
+ int3 xi_offset = ((int3)xi + (int3)(0, 1, 2) * DILATION_X - (int3)PAD_LEFT);
+#if PAD_LEFT != 0 || PAD_RIGHT != 0
+#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
+ xi_offset = CLAMP(xi_offset, (int3)0, (int3)(SRC_WIDTH - 1));
+#endif // PAD_LEFT != 0 || PAD_RIGHT != 0
+ // Multiply by src_stride_y as the width (X) dimension here is the second (y) dimension in src NHWC tensor
+ xi_offset *= (int3)src_stride_y;
+
+ // Out-of-bound condition for X
+ int3 x_cond = (((int3)xi + (int3)(0, 1, 2) * DILATION_X - (int3)PAD_LEFT) < (int3)0) || (((int3)xi + (int3)(0, 1, 2) * DILATION_X - (int3)PAD_LEFT) >= (int3)SRC_WIDTH);
+
+ // yi == 0
+ // Clamp yi
+ // yi_coord is casted to unsigned int in order to use just a min() operation
+ // A "-1" 32 bit signed variable converted to unsigned gives 4294967295
+ // This is a trick so that the values loaded in the padding areas are always from the last row (SRC_HEIGHT - 1),
+ // because of the negative yi_coord wrap-around, but it gets overwritten by PAD_VALUE immediately as the wrap-around
+ // also causes y_cond (y padding condition) to be satisfied
+ yi_coord = yi - (int)PAD_TOP;
+
+ // Clamp only if PAD_TOP or PAD_BOTTOM is not equal to 0
+#if PAD_TOP != 0 || PAD_BOTTOM != 0
+ yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1));
+#endif // PAD_TOP != 0 || PAD_BOTTOM != 0
+
+ // Compute offset
+ offset = xi_offset + (yi_coord * (int)src_stride_z);
+
+ // Load input values
+ VECTOR_N values0 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s0));
+ VECTOR_N values1 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s1));
+ VECTOR_N values2 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s2));
+
+#if PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
+ // Replace invalid values with PAD_VALUE
+ int y_cond = (int)((uint)(yi - (int)PAD_TOP) >= (uint)(SRC_HEIGHT));
+ values0 = select(values0, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s0)));
+ values1 = select(values1, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s1)));
+ values2 = select(values2, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s2)));
+#endif // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
+
+ // yi == 1
+ // Clamp yi_coord (it can be negative if PAD_TOP > 1)
+ yi_coord = yi - (int)PAD_TOP + 1 * DILATION_Y;
+
+ // Clamp only if PAD_TOP or PAD_BOTTOM is not equal to 0
+#if PAD_TOP != 0 || PAD_BOTTOM != 0
+ yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1));
+#endif // PAD_TOP != 0 || PAD_BOTTOM != 0
+
+ // Compute offset
+ offset = xi_offset + (yi_coord * (int)src_stride_z);
+
+ // Load input values
+ VECTOR_N values3 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s0));
+ VECTOR_N values4 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s1));
+ VECTOR_N values5 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s2));
+
+#if PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
+ // Replace invalid values with zeros
+ y_cond = (int)((uint)(yi - (int)PAD_TOP + 1 * DILATION_Y) >= (uint)(SRC_HEIGHT));
+ values3 = select(values3, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s0)));
+ values4 = select(values4, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s1)));
+ values5 = select(values5, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s2)));
+#endif // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
+
+ // yi == 2
+ // Clamp yi_coord
+ yi_coord = yi - (int)PAD_TOP + 2 * DILATION_Y;
+
+ // Clamp only if PAD_TOP or PAD_BOTTOM is not equal to 0
+#if PAD_TOP != 0 || PAD_BOTTOM != 0
+ yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1));
+#endif // PAD_TOP != 0 || PAD_BOTTOM != 0
+
+ // Compute offset
+ offset = xi_offset + (yi_coord * (int)src_stride_z);
+
+ // Load input values
+ VECTOR_N values6 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s0));
+ VECTOR_N values7 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s1));
+ VECTOR_N values8 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset.s2));
+
+#if PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
+ // Replace invalid values with PAD_VALUE
+ y_cond = (int)((uint)(yi - (int)PAD_TOP + 2 * DILATION_Y) >= (uint)(SRC_HEIGHT));
+ values6 = select(values6, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s0)));
+ values7 = select(values7, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s1)));
+ values8 = select(values8, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond.s2)));
+#endif // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
+
+ // Store in a boundary-aware way to avoid padding
+ IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, values, 0, output_ptr)
+
+#ifdef HAS_BIAS
+ // We can use VECTOR_SIZE instead of BOUNDARY_VECTOR_SIZE even if it's at the boundary. This is because the bias is
+ // added at the end of the channel, while the boundary vec is at the beginning of the channel.
+ // The only case where the boundary vec is at the end of the channel is when there's only a single boundary vec in
+ // the whole channel dimension, but in that case VECTOR_SIZE is also equal to BOUNDARY_VECTOR_SIZE
+ // See the value of num_elems_processed_per_iteration in configure_opencl_kernel method in CLIm2ColKernel.cpp
+ if((ch + VECTOR_SIZE) >= SRC_DEPTH)
+ {
+ *((__global DATA_TYPE *)(output_ptr) - ch + SRC_DEPTH * 9) = 1.0f;
+ }
+#endif // HAS_BIAS
+}
+#endif // defined(IM2COL_3X3)
+
+#if defined(IM2COL_9X9)
+#if PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
+#define IM2COL1x9(i) \
+ ({ \
+ yi_coord = yi - (int)PAD_TOP + i * DILATION_Y; \
+ yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1)); \
+ \
+ offset0 = xi_offset0 + (yi_coord * (int)src_stride_z); \
+ offset1 = xi_offset1 + (yi_coord * (int)src_stride_z); \
+ \
+ VECTOR_N values0 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s0)); \
+ VECTOR_N values1 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s1)); \
+ VECTOR_N values2 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s2)); \
+ VECTOR_N values3 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s3)); \
+ VECTOR_N values4 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s4)); \
+ VECTOR_N values5 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s5)); \
+ VECTOR_N values6 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s6)); \
+ VECTOR_N values7 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s7)); \
+ VECTOR_N values8 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset1)); \
+ \
+ int y_cond = (int)((uint)(yi - (int)PAD_TOP + i * DILATION_Y) >= (uint)(SRC_HEIGHT)); \
+ values0 = select(values0, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s0))); \
+ values1 = select(values1, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s1))); \
+ values2 = select(values2, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s2))); \
+ values3 = select(values3, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s3))); \
+ values4 = select(values4, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s4))); \
+ values5 = select(values5, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s5))); \
+ values6 = select(values6, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s6))); \
+ values7 = select(values7, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond0.s7))); \
+ values8 = select(values8, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)y_cond || (COND_N)(x_cond1))); \
+ \
+ IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, values, i, output_ptr) \
+ })
+#else // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
+#define IM2COL1x9(i) \
+ ({ \
+ yi_coord = yi - (int)PAD_TOP + i * DILATION_Y; \
+ yi_coord = min((uint)yi_coord, (uint)(SRC_HEIGHT - 1)); \
+ \
+ offset0 = xi_offset0 + (yi_coord * (int)src_stride_z); \
+ offset1 = xi_offset1 + (yi_coord * (int)src_stride_z); \
+ \
+ VECTOR_N values0 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s0)); \
+ VECTOR_N values1 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s1)); \
+ VECTOR_N values2 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s2)); \
+ VECTOR_N values3 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s3)); \
+ VECTOR_N values4 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s4)); \
+ VECTOR_N values5 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s5)); \
+ VECTOR_N values6 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s6)); \
+ VECTOR_N values7 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset0.s7)); \
+ VECTOR_N values8 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset1)); \
+ \
+ IM2COL1X9_NHWC_STORE(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE, DATA_TYPE, SRC_DEPTH, values, i, output_ptr) \
+ })
+#endif // PAD_TOP != 0 || PAD_LEFT != 0 || PAD_BOTTOM != 0 || PAD_RIGHT != 0
+
+/** This kernel performs im2col when the kernel size is 9x9 and the data layout is NHWC
+ *
+ * @note This kernel computes VECTOR_SIZE elements
+ * @note This kernel stores VECTOR_SIZE or BOUNDARY_VECTOR_SIZE (if at boundary) elements
+ * @note The vector size must be passed at compile time using -DVECTOR_SIZE: e.g. -DVECTOR_SIZE=2
+ * @note The boundary vector size must be passed at compile time using -DBOUNDARY_VECTOR_SIZE: e.g. -DBOUNDARY_VECTOR_SIZE=1
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The width of output tensor after matrix multiplication must be passed at compile time using -DCONVOLVED_WIDTH: e.g. -DCONVOLVED_WIDTH=34
+ * @note The kernel depth must be passed at compile time using -DSRC_DEPTH: e.g. -DSRC_DEPTH=3
+ * @note The stride along the Y direction must be passed at compile time using -DSTRIDE_Y: e.g. -DSTRIDE_Y=1
+ * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8_SIGNED/QASYMM8/F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes).
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes).
+ */
+__kernel void im2col9x9_nhwc(
+ TENSOR3D_DECLARATION(src),
+ IMAGE_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ // input feature map, boundary-corrected (shift all non-boundary vectors by shift_amount) to avoid padding
+ const int shift_amount = (int)VECTOR_SIZE - (int)BOUNDARY_VECTOR_SIZE;
+ const int ch = max((int)(get_global_id(0) * VECTOR_SIZE) - shift_amount, 0);
+ const int yo = get_global_id(1);
+ const int batch = get_global_id(2); // batch size
+
+ // Calculate input indices
+ const int xi = (get_global_id(1) % CONVOLVED_WIDTH) * STRIDE_X;
+ const int yi = (get_global_id(1) / (int)CONVOLVED_WIDTH) * STRIDE_Y;
+
+ // Get input and output address
+ __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + batch * (int)src_stride_w;
+ __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + ch * sizeof(DATA_TYPE) + yo * (int)dst_stride_y + batch * (int)dst_stride_w;
+
+ int yi_coord = 0;
+ int8 offset0 = 0;
+ int offset1 = 0;
+
+ // Clamp xi
+ int8 xi_offset0 = ((int8)xi + (int8)(0, 1, 2, 3, 4, 5, 6, 7) * DILATION_X - (int8)PAD_LEFT);
+ int xi_offset1 = ((int)xi + (int)(8) * DILATION_X - (int)PAD_LEFT);
+
+#if PAD_LEFT != 0 || PAD_RIGHT != 0
+#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
+ xi_offset0 = CLAMP(xi_offset0, (int8)0, (int8)(SRC_WIDTH - 1));
+ xi_offset1 = CLAMP(xi_offset1, (int)0, (int)(SRC_WIDTH - 1));
+#endif // PAD_LEFT != 0 || PAD_RIGHT != 0
+ xi_offset0 *= (int8)src_stride_y;
+ xi_offset1 *= (int)src_stride_y;
+
+ // Out-of-bound condition for X
+ int8 x_cond0 = (((int8)xi + (int8)(0, 1, 2, 3, 4, 5, 6, 7) * DILATION_X - (int8)PAD_LEFT) < (int8)0) || (((int8)xi + (int8)(0, 1, 2, 3, 4, 5, 6, 7) * DILATION_X - (int8)PAD_LEFT) >= (int8)SRC_WIDTH);
+ int x_cond1 = (((int)xi + (int)(8) * DILATION_X - (int)PAD_LEFT) < (int)0) || (((int)xi + (int)(8) * DILATION_X - (int)PAD_LEFT) >= (int)SRC_WIDTH);
+
+ IM2COL1x9(0);
+ IM2COL1x9(1);
+ IM2COL1x9(2);
+ IM2COL1x9(3);
+ IM2COL1x9(4);
+ IM2COL1x9(5);
+ IM2COL1x9(6);
+ IM2COL1x9(7);
+ IM2COL1x9(8);
+
+#ifdef HAS_BIAS
+ // We can use VECTOR_SIZE instead of BOUNDARY_VECTOR_SIZE even if it's at the boundary. This is because the bias is
+ // added at the end of the channel, while the boundary vec is at the beginning of the channel.
+ // The only case where the boundary vec is at the end of the channel is when there's only a single boundary vec in
+ // the whole channel dimension, but in that case VECTOR_SIZE is also equal to BOUNDARY_VECTOR_SIZE
+ // See the value of num_elems_processed_per_iteration in configure_opencl_kernel method in CLIm2ColKernel.cpp
+ if((ch + VECTOR_SIZE) >= SRC_DEPTH)
+ {
+ *((__global DATA_TYPE *)(output_ptr) - ch + SRC_DEPTH * 81) = 1.0f;
+ }
+#endif // HAS_BIAS
+}
+#endif // defined(IM2COL_9X9)
+
+#if defined(IM2COL_GENERIC)
+/** This opencl kernel performs a generic im2col implementation when the data layout is NHWC
+ *
+ * @note This kernel computes VECTOR_SIZE elements
+ * @note This kernel stores VECTOR_SIZE or BOUNDARY_VECTOR_SIZE (if at boundary) elements
+ * @note The vector size must be passed at compile time using -DVECTOR_SIZE: e.g. -DVECTOR_SIZE=2
+ * @note The boundary vector size must be passed at compile time using -DBOUNDARY_VECTOR_SIZE: e.g. -DBOUNDARY_VECTOR_SIZE=1
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The width and height of the input tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT: e.g. -DSRC_WIDTH=128 and -DSRC_HEIGHT=128
+ * @note The width of output tensor after matrix multiplication must be passed at compile time using -DCONVOLVED_WIDTH: e.g. -DCONVOLVED_WIDTH=34
+ * @note The kernel width, height and depth must be passed at compile time using -DKERNEL_WIDTH, -DKERNEL_HEIGHT and -DSRC_DEPTH: e.g. -DKERNEL_WIDTH=3, -DKERNEL_HEIGHT=3 and -DSRC_DEPTH=64
+ * @note The pad_left, pad_right, pad_top and pad_bottom must be passed at compile time using -DPAD_LEFT, -DPAD_RIGHT, -DPAD_TOP and -DPAD_BOTTOM: e.g. -DPAD_LEFT=1, -DPAD_RIGHT=2, -DPAD_TOP=3 and -DPAD_BOTTOM=2
+ * @note The zero value to store in case we load values out-of-bounds must be passed at compile time using -DPAD_VALUE: e.g. -DPAD_VALUE=0.0
+ * @note The stride along the X and Y directions must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y: e.g. -DSTRIDE_X=1 and -DSTRIDE_Y=1
+ * @note The dilation_x and dilation_y must be passed at compile time using -DDILATION_X and -DDILATION_Y: e.g. -DDILATION_X=1, -DDILATION_Y=1
+ * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8_SIGNED/QASYMM8/F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes).
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes).
+ */
+__kernel void im2col_generic_nhwc(
+ TENSOR3D_DECLARATION(src),
+ IMAGE_DECLARATION(dst),
+ uint src_stride_w,
+ uint dst_stride_w)
+{
+ // input feature map, boundary-corrected (shift all non-boundary vectors by shift_amount) to avoid padding
+ const int shift_amount = (int)VECTOR_SIZE - (int)BOUNDARY_VECTOR_SIZE;
+ const int ch = max((int)(get_global_id(0) * VECTOR_SIZE) - shift_amount, 0);
+ const int yo = get_global_id(1);
+ const int batch = get_global_id(2); // batch size
+
+ // Calculate input indices
+ const int xi = (yo % CONVOLVED_WIDTH) * STRIDE_X;
+ const int yi = (yo / (int)CONVOLVED_WIDTH) * STRIDE_Y;
+
+ // Get input and output address
+ const int stride_x = ch * sizeof(DATA_TYPE);
+ __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + stride_x + batch * (int)src_stride_w;
+ __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + stride_x + yo * (int)dst_stride_y + batch * (int)dst_stride_w;
+
+ int i = 0;
+ for(int yk = 0; yk < KERNEL_HEIGHT; ++yk)
+ {
+ // Clamp yi_coord
+ int yi_coord = yi + yk * DILATION_Y - (int)PAD_TOP;
+ yi_coord = clamp(yi_coord, (int)0, (int)(SRC_HEIGHT - 1));
+
+ // Out-of-bound condition for Y
+ int y_border_condition = ((yi + yk * DILATION_Y - (int)PAD_TOP) < (int)0) || ((yi + yk * DILATION_Y - (int)PAD_TOP) >= (int)SRC_HEIGHT);
+
+ for(int xk = 0; xk < KERNEL_WIDTH; ++xk)
+ {
+ // Clamp xi_coord
+ int xi_coord = (xi + xk * DILATION_X - (int)PAD_LEFT);
+ xi_coord = clamp(xi_coord, (int)0, (int)(SRC_WIDTH - 1));
+
+ // Out-of-bound condition for X
+ int x_border_condition = ((xi + xk * DILATION_X - (int)PAD_LEFT) < (int)0) || ((xi + xk * DILATION_X - (int)PAD_LEFT) >= (int)SRC_WIDTH);
+
+ int offset = xi_coord * (int)src_stride_y + (yi_coord * (int)src_stride_z);
+
+ VECTOR_N values0 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(input_ptr + offset));
+
+#if PAD_LEFT != 0 || PAD_TOP != 0 || PAD_RIGHT != 0 || PAD_BOTTOM != 0
+ // Replace with PAD_VALUE if the value is out-of-bound
+ values0 = select(values0, (VECTOR_N)PAD_VALUE, (COND_N)((COND_N)x_border_condition || (COND_N)(y_border_condition)));
+#endif // PAD_LEFT != 0 || PAD_TOP != 0 || PAD_RIGHT != 0 || PAD_BOTTOM != 0
+
+ // Store in a boundary-aware way to avoid padding
+#if BOUNDARY_VECTOR_SIZE != VECTOR_SIZE
+ const bool at_channel_boundary = get_global_id(0) == 0;
+ if(at_channel_boundary)
+ {
+ VSTORE_PARTIAL(VECTOR_SIZE, BOUNDARY_VECTOR_SIZE)
+ (values0, 0, (__global DATA_TYPE *)(output_ptr) + i * (int)SRC_DEPTH);
+ }
+ else // at_channel_boundary
+#endif // BOUNDARY_VECTOR_SIZE != VECTOR_SIZE
+ {
+ VSTORE(VECTOR_SIZE)
+ (values0, 0, (__global DATA_TYPE *)(output_ptr) + i * (int)SRC_DEPTH);
+ }
+ i++;
+ }
+ }
+
+#ifdef HAS_BIAS
+ // We can use VECTOR_SIZE instead of BOUNDARY_VECTOR_SIZE even if it's at the boundary. This is because the bias is
+ // added at the end of the channel, while the boundary vec is at the beginning of the channel.
+ // The only case where the boundary vec is at the end of the channel is when there's only a single boundary vec in
+ // the whole channel dimension, but in that case VECTOR_SIZE is also equal to BOUNDARY_VECTOR_SIZE
+ // See the value of num_elems_processed_per_iteration in configure_opencl_kernel method in CLIm2ColKernel.cpp
+ if((ch + VECTOR_SIZE) >= SRC_DEPTH)
+ {
+ *((__global DATA_TYPE *)(output_ptr) - ch + SRC_DEPTH * KERNEL_WIDTH * KERNEL_HEIGHT) = 1.0f;
+ }
+#endif // HAS_BIAS
+}
+#endif // defined(IM2COL_GENERIC) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl b/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
new file mode 100644
index 0000000000..aa719bfef0
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#if defined(INDIRECT_CONVOLUTION_ADDRESS_PRECALCULATION)
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the indirect convolution 2d indirect buffer.
+ *
+ * @note This kernel only works for unit batch_size
+ *
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
+ * @note The kernel width must be passed at compile time using -DWEI_CONV_WIDTH (e.g. -DWEI_CONV_WIDTH=9)
+ * @note The spatial dimensions of the source tensor used by conv2d must be passed at compile time using -DSRC_CONV_WIDTH and -DSRC_CONV_HEIGHT (e.g. -DSRC_CONV_WIDTH=96, -DSRC_CONV_HEIGHT=64)
+ * @note The width dimension of the destination tensor produced by conv2d must be passed at compile time using -DDST_CONV_WIDTH (e.g. -DDST_CONV_WIDTH=96)
+ * @note The tensor type ("BUFFER" only) of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * - M0 = 1, 2, 3, 4, 5, 6, 7, and 8
+ *
+ * @param[out] dst_img (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: INT32
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+//! @endcond
+__kernel void indirect_convolution_address_precalculation(
+ TENSOR4D_WO_T(dst, DST_TENSOR_TYPE))
+{
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+ const int z = get_global_id(2);
+
+ // Note: WIDTH = M0 x KernelWidth x KernelHeight
+
+ // m index
+ const int mi = x % M0;
+ // Kernel index
+ const int ki = x / M0;
+ // Kernel width coordinate
+ const int xk = ki % WEI_CONV_WIDTH;
+ // kernel height coordinate
+ const int yk = ki / WEI_CONV_WIDTH;
+
+ TILE(DST_DATA_TYPE, 1, 1, xi);
+ TILE(DST_DATA_TYPE, 1, 1, yi);
+ TILE(DST_DATA_TYPE, 1, 1, my);
+
+ const int mout = y * M0;
+
+ xi[0].s[0] = ((mout + mi) % DST_CONV_WIDTH) * STRIDE_X;
+ yi[0].s[0] = ((mout + mi) / DST_CONV_WIDTH) * STRIDE_Y;
+ xi[0].s[0] -= PAD_LEFT;
+ yi[0].s[0] -= PAD_TOP;
+
+ const int x_s = xi[0].s[0] + xk;
+ const int y_s = yi[0].s[0] + yk;
+ my[0].s[0] = x_s + y_s * SRC_CONV_WIDTH;
+ my[0].s[0] = my[0].s[0] + z * (int)(SRC_CONV_WIDTH * SRC_CONV_HEIGHT);
+ my[0].s[0] = select(-1, my[0].s[0], x_s >= 0);
+ my[0].s[0] = select(-1, my[0].s[0], x_s < SRC_CONV_WIDTH);
+ my[0].s[0] = select(-1, my[0].s[0], y_s >= 0);
+ my[0].s[0] = select(-1, my[0].s[0], y_s < SRC_CONV_HEIGHT);
+
+ VSTORE(1)
+ (my[0].s[0], 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DST_DATA_TYPE) + y * dst_stride_y + z * dst_stride_z));
+}
+#endif // defined(INDIRECT_CONVOLUTION_ADDRESS_PRECALCULATION)
+
+#if defined(INDIRECT_CONVOLUTION_NHWC)
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the indirect convolution.
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
+ * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
+ * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note The vector length used for loading the values from the indirect buffer should be passed at compile time using -DIND_BUFF_VEC_SIZE (e.g. -DIND_BUFF_VEC_SIZE=4)
+ * @note The activation function to fuse and corresponding A and B values should be passed at compile time using -DACTIVATION_TYPE, -DA_VAL, and -DB_VAL
+ * (e.g. -DFUNCTION_TYPE=lu_brelu_op, -DA_VAL=3.0, and -DB_VAL=1.0)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 5, 6, and 8
+ * - N0 = 2, 3, 4, 8, 16
+ * - K0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
+ *
+ * @param[in] src_img (Not supported) Read only cl_image object for the source tensor. Included when SRC_TENSOR_TYPE=IMAGE
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_c The size of the channels dimension of the source tensor
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the batches dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] off_img (Not supported) Read only cl_image object for the indirect buffer tensor. Included when OFF_TENSOR_TYPE=IMAGE
+ * @param[in] off_ptr Pointer to the indirect buffer tensor. Supported data type: INT32
+ * @param[in] off_stride_y Stride of the indirect buffer tensor in Y dimension (in bytes)
+ * @param[in] off_stride_z Stride of the indirect buffer tensor in Z dimension (in bytes)
+ * @param[in] off_stride_w Stride of the indirect buffer tensor in W dimension (in bytes)
+ * @param[in] off_c The size of the channels dimension of the indirect buffer tensor
+ * @param[in] off_w The size of the width dimension of the indirect buffer tensor
+ * @param[in] off_h The size of the height dimension of the indirect buffer tensor
+ * @param[in] off_n The size of the batches dimension of the indirect buffer tensor
+ * @param[in] off_offset_first_element_in_bytes The offset of the first element in the indirect buffer tensor
+ * @param[out] dst_img (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[out] wei_img (Optional) Read only cl_image object for the weights tensor. Included when WEI_TENSOR_TYPE=IMAGE
+ * @param[out] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_c The size of the channels dimension of the weights tensor
+ * @param[in] wei_w The size of the width dimension of the weights tensor
+ * @param[in] wei_h The size of the height dimension of the weights tensor
+ * @param[in] wei_n The size of the batches dimension of the weights tensor
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr
+ * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
+ */
+//! @endcond
+__kernel void indirect_convolution_nhwc(
+ TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
+ TENSOR4D_RO_T(off, OFF_TENSOR_TYPE),
+ TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
+ TENSOR4D_RO_T(wei, WEI_TENSOR_TYPE)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+ // All the tensor dimensions are passed at compile time.
+ // In case of dynamic tensor support, the following dimensions should be passed as function argument.
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _ISRC_CHANNELS SRC_CHANNELS
+#define _IDST_WIDTH DST_WIDTH
+#define _IDST_HEIGHT DST_HEIGHT
+#define _IY_MULTIPLIER (_IWEI_WIDTH * _IWEI_HEIGHT)
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, M0, 0); // WIDTH x HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+
+ off_offset_first_element_in_bytes += get_global_id(1) * off_stride_y;
+ off_offset_first_element_in_bytes += bout * off_stride_z;
+
+ // Initialize the accumulators
+ TILE(DST_DATA_TYPE, M0, N0, c);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+ for(int i = 0; i < (_IWEI_WIDTH * _IWEI_HEIGHT); ++i)
+ {
+ TILE(int, 1, IND_BUFF_VEC_SIZE, my);
+ T_LOAD(int, 1, IND_BUFF_VEC_SIZE, OFF_TENSOR_TYPE, off, i * M0, 0, 1, 0, my);
+
+ int ck = 0;
+ for(; ck <= (_ISRC_CHANNELS - K0); ck += K0)
+ {
+ TILE(SRC_DATA_TYPE, M0, K0, a);
+ TILE(WEI_DATA_TYPE, N0, K0, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.0;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
+ }
+
+ // This #if directive should be removed in case of dynamic tensor support
+#if defined(LEFTOVER_LOOP)
+ // Left-over accumulations
+ for(; ck < _ISRC_CHANNELS; ++ck)
+ {
+ TILE(SRC_DATA_TYPE, M0, 1, a);
+ TILE(WEI_DATA_TYPE, N0, 1, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.0;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ // The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
+ T_LOAD(WEI_DATA_TYPE, N0, 1, BUFFER, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, DST_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
+ }
+#endif // defined(LEFTOVER_LOOP)
+ }
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 1, 0, bias0);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(DST_DATA_TYPE, M0, N0, c, bias0, c);
+
+#endif // HAS_BIAS
+
+ // Apply activation
+ T_ACTIVATION(DST_DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
+
+ TILE(uint, M0, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH * _IDST_HEIGHT) - 1);
+ dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
+ })
+
+ const bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DST_DATA_TYPE, M0, N0, PARTIAL_N0, DST_TENSOR_TYPE, dst, cout, dst_stride_y, x_cond, c, dst_indirect_y);
+
+#undef _IWEI_WIDTH
+#undef _IWEI_HEIGHT
+#undef _ISRC_CHANNELS
+#undef _IDST_WIDTH
+#undef _IDST_HEIGHT
+#undef _IY_MULTIPLIER
+}
+#endif // defined(INDIRECT_CONVOLUTION_NHWC)
diff --git a/src/core/CL/cl_kernels/nhwc/normalization_layer.cl b/src/core/CL/cl_kernels/nhwc/normalization_layer.cl
new file mode 100644
index 0000000000..7e35e161c8
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/normalization_layer.cl
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#define MUL_OP(x, y) ((x) * (y))
+#define ADD_OP(x, y) ((x) + (y))
+#define DIV_OP(x, y) ((x) / (y))
+#define POW_OP(x, y) pow((x), (y))
+#define SQCVT_SAT(a) (a)
+
+#if defined(WIDTH_SIZE)
+/** Apply cross-map normalization.
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size, e.g. -DVEC_SIZE=16
+ * @note The radius should be given as a preprocessor argument using -DRADIUS=size. e.g. -DRADIUS=5
+ * @note The number of slices should be given as a preprocessor argument using -DNUM_SLICES=size. e.g. -DNUM_SLICES=192
+ * @note Scaling coefficient (= alpha/norm_size), beta and kappa need to be passed at compile time using -DCOEFF, -DALPHA and -DKAPPA
+ *
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void normalization_layer_cross_map_nhwc(TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
+{
+ // Offset computation
+ const uint x_offs = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER);
+
+ // Address computation
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ acc = 0;
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ coeff_v = SQCVT_SAT(COEFF);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ beta_v = SQCVT_SAT(BETA);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ kappa_v = SQCVT_SAT(KAPPA);
+
+ const int left_slice = max((int)0, (int)x_offs - (int)RADIUS);
+ const int right_slice = min((int)WIDTH_SIZE - 1, (int)x_offs + (int)RADIUS);
+
+ for(int i = left_slice; i <= right_slice; ++i)
+ {
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + i * sizeof(DATA_TYPE)));
+ acc = ADD_OP(acc, MUL_OP(values, values));
+ }
+
+ acc = ADD_OP(MUL_OP(acc, coeff_v), kappa_v);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ normalized = POW_OP(acc, beta_v);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ normalized_pixel0 = DIV_OP(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + x_offs * sizeof(DATA_TYPE))), normalized);
+
+ STORE_VECTOR_SELECT(normalized_pixel, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
+}
+#endif // defined(WIDTH_SIZE)
+
+#if defined(NUM_SLICES) && defined(DIM1_SIZE)
+/** Apply in-map normalization when tensors are in the NHWC data layout format.
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size, e.g. -DVEC_SIZE=16
+ * @note The radius should be given as a preprocessor argument using -DRADIUS=size. e.g. -DRADIUS=5
+ * @note The number of slices should be given as a preprocessor argument using -DNUM_SLICES=size. e.g. -DNUM_SLICES=192
+ * @note Scaling coefficient (= alpha/norm_size), beta and kappa need to be passed at compile time using -DCOEFF, -DALPHA and -DKAPPA
+ *
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the first destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void normalization_layer_in_map_nhwc(TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
+{
+ // Offset computation
+ const uint x_offs = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER);
+ const int current_cols = get_global_id(1);
+ const int current_rows = get_global_id(2);
+
+ // Address computation
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE);
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + current_cols * output_stride_y + current_rows * output_stride_z;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ acc = 0;
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ coeff_v = SQCVT_SAT(COEFF);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ beta_v = SQCVT_SAT(BETA);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ kappa_v = SQCVT_SAT(KAPPA);
+
+ const int first_col = max(0, current_cols - (int)RADIUS);
+ const int last_col = min((int)DIM1_SIZE - 1, current_cols + (int)RADIUS);
+
+#if defined(IN_MAP_2D)
+ const int first_row = max(0, current_rows - (int)RADIUS);
+ const int last_row = min((int)NUM_SLICES - 1, current_rows + (int)RADIUS);
+#endif /* defined(IN_MAP_2D) */
+
+#if defined(IN_MAP_2D)
+ for(int j = first_row; j <= last_row; ++j)
+ {
+#else // defined(IN_MAP_2D)
+ const int j = current_rows;
+#endif /* defined(IN_MAP_2D) */
+ for(int i = first_col; i <= last_col; ++i)
+ {
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + i * input_stride_y + j * input_stride_z));
+ acc = ADD_OP(acc, MUL_OP(values, values));
+ }
+#if defined(IN_MAP_2D)
+ }
+#endif /* defined(IN_MAP_2D) */
+
+ acc = ADD_OP(MUL_OP(acc, coeff_v), kappa_v);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ normalized = POW_OP(acc, beta_v);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ normalized_pixel0 = DIV_OP(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + current_cols * output_stride_y + current_rows *output_stride_z)), normalized);
+
+ STORE_VECTOR_SELECT(normalized_pixel, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
+}
+#endif // defined(NUM_SLICES) && defined(DIM1_SIZE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer.cl b/src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer.cl
new file mode 100644
index 0000000000..86c33499e2
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer.cl
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(VEC_SIZE)
+
+#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+
+/** Apply normalize_planar_yuv layer on tensors with NHWC data layout.
+ *
+ * @note Data type should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE e.g. -DVEC_SIZE=8
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ *
+ * @param[in] src_ptr Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] src_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p src_ptr
+ * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
+ * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
+ * @param[in] std_ptr Pointer to the std tensor. Supported data types: same as @p src_ptr
+ * @param[in] std_stride_x Stride of the std tensor in X dimension (in bytes)
+ * @param[in] std_step_x std_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] std_offset_first_element_in_bytes The offset of the first element in the var source tensor
+ */
+__kernel void normalize_planar_yuv_layer_nhwc(TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ VECTOR_DECLARATION(mean),
+ VECTOR_DECLARATION(std))
+{
+ uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0);
+
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
+ __global uchar *mean_addr = mean_ptr + mean_offset_first_element_in_bytes + x_offs;
+ __global uchar *std_addr = std_ptr + std_offset_first_element_in_bytes + x_offs;
+
+ const TYPE curr_mean = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)mean_addr);
+ const TYPE curr_std = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)std_addr);
+
+ TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src_addr);
+ TYPE res0 = (data - curr_mean) / curr_std;
+
+ STORE_VECTOR_SELECT(res, DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
+}
+#endif // defined(DATA_TYPE) && defined(VEC_SIZE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer_quantized.cl b/src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer_quantized.cl
new file mode 100644
index 0000000000..7bc3c15a63
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer_quantized.cl
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(OFFSET) && defined(SCALE)
+
+#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+#define OFFSET_FLT ((float)OFFSET)
+#define SCALE_FLT ((float)SCALE)
+
+/** Apply normalize_planar_yuv layer on tensors with NHWC data layout.
+ *
+ * @note Data type should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE e.g. -DVEC_SIZE=8
+ * @note The quantization offset should be given as a preprocessor argument using -DOFFSET e.g. -DOFFSET=8
+ * @note The quantization scale should be given as a preprocessor argument using -DSCALE e.g. -DSCALE=8
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ *
+ * @param[in] src_ptr Pointer to the first source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
+ * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] src_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p src_ptr
+ * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
+ * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
+ * @param[in] std_ptr Pointer to the std tensor. Supported data types: same as @p src_ptr
+ * @param[in] std_stride_x Stride of the std tensor in X dimension (in bytes)
+ * @param[in] std_step_x std_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] std_offset_first_element_in_bytes The offset of the first element in the var source tensor
+ */
+__kernel void normalize_planar_yuv_layer_q8_nhwc(TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ VECTOR_DECLARATION(mean),
+ VECTOR_DECLARATION(std))
+{
+ uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0);
+
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
+ __global uchar *mean_addr = mean_ptr + mean_offset_first_element_in_bytes + x_offs;
+ __global uchar *std_addr = std_ptr + std_offset_first_element_in_bytes + x_offs;
+
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ curr_mean_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)mean_addr), VEC_DATA_TYPE(float, VEC_SIZE));
+ curr_mean_flt = round(curr_mean_flt - OFFSET_FLT) * SCALE_FLT;
+
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ curr_std_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)std_addr), VEC_DATA_TYPE(float, VEC_SIZE));
+ curr_std_flt = round(curr_std_flt - OFFSET_FLT) * SCALE_FLT;
+
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ data_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src_addr), VEC_DATA_TYPE(float, VEC_SIZE));
+ data_flt = round(data_flt - OFFSET_FLT) * (SCALE_FLT);
+
+ // Perform normalization
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res_flt = (data_flt - curr_mean_flt) / curr_std_flt;
+
+ const TYPE res0 = CONVERT_SAT(round(res_flt / SCALE_FLT) + OFFSET_FLT, TYPE);
+ STORE_VECTOR_SELECT(res, DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
+}
+#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(OFFSET) && defined(SCALE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nhwc/pooling_3d_layer.cl b/src/core/CL/cl_kernels/nhwc/pooling_3d_layer.cl
new file mode 100644
index 0000000000..4e5481d1db
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/pooling_3d_layer.cl
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h" // Needed for GET_SPATIAL_IDX()
+
+#if defined(POOL_AVG) || defined(POOL_L2)
+#define POOL_OP(x, y) ((x) + (y))
+#else /* defined(POOL_AVG) || defined(POOL_L2) */
+#define POOL_OP(x, y) (fmax((x), (y)))
+#endif /* defined(POOL_AVG) || defined(POOL_L2) */
+
+#define SQRT_OP(x) sqrt((x))
+
+#if defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(SRC_DEPTH) && defined(DST_CHANNELS) && defined(DST_HEIGHT) && defined(DST_DEPTH) && defined(DST_BATCH_SIZE) && defined(ACC_DATA_TYPE)
+
+#if defined(POOL_SIZE_X) && defined(POOL_SIZE_Y) && defined(POOL_SIZE_Z)
+
+/** Performs 3d pooling layer of size equal to MxNXD. This OpenCL kernel can perform the following pooling types:
+ * -# max, -DPOOL_MAX must be passed at compile time
+ * -# average, -DPOOL_AVG must be passed at compile time. If padding has to be excluded, -DEXCLUDE_PADDING should be passed at compile time
+ * -# l2 normalisation, -DPOOL_L2 must be passed at compile time
+ *
+ * @note Datatype must be passed at compile type using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F32/F16
+ * @note Accumulation data type must be passed at compile time using -DACC_DATA_TYPE e.g. -DACC_DATA_TYPE=float
+ * @note If -DFP_MIXED_PRECISION is passed at compile time, the kernel will use F32 for the partial result
+ * @note Pool size must be passed at compile time using -DPOOL_SIZE_X, -DPOOL_SIZE_Y, and -DPOOL_SIZE_Z. e.g. -DPOOL_SIZE_X=4, -DPOOL_SIZE_Y=4, -DPOOL_SIZE_Z=2
+ * @note Input tensor width, height and depth must be passed at compile time using -DSRC_WIDTH, -DSRC_HEIGHT, and -DSRC_DEPTH
+ * @note Output tensor height, channels, depth, and batch size must be passed at compile time using -DDST_HEIGHT, -DDST_CHANNELS, -DDST_DEPTH, and -DDST_BATCH_SIZE
+ * @note Pool strides must be passed at compile time using -DSTRIDE_X, -DSTRIDE_Y and -DSTRIDE_Z which are the steps of the window along the x, y and z directions
+ * @note Pool pads must be passed at compile time using -DPAD_X, -DPAD_Y, -DPAD_Z
+ * @note Vector size must be passed at compile time using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size must be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] input_stride_v Stride of the source tensor in V dimension (in bytes)
+ * @param[in] input_step_v input_stride_v * number of elements along V processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] output_step_w output_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] output_stride_v Stride of the destination tensor in V dimension (in bytes)
+ * @param[in] output_step_v output_stride_v * number of elements along V processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void pooling_3d_layer_MxN_ndhwc(
+ TENSOR5D_DECLARATION(input),
+ TENSOR5D_DECLARATION(output))
+{
+ // Note: If C is not multiple of VEC_SIZE, we shift back of VEC_SIZE_LEFTOVER elements to compute the leftover elements for get_global_id(0) == 0
+ // Note: If C is less than VEC_SIZE, VEC_SIZE should be SHRINKED to the closest smaller VEC_SIZE. This operation is performed on the host side
+ int idx_out_c = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER);
+ int idx_out_w = GET_SPATIAL_IDX(1, 1, 0);
+
+ // The depth size dimension and the batch size dimension are collapsed over the height dimension
+ int idx_out_h = GET_SPATIAL_IDX(2, 1, 0) % DST_HEIGHT;
+ int idx_out_d = (GET_SPATIAL_IDX(2, 1, 0) / DST_HEIGHT) % DST_DEPTH;
+ int idx_out_n = (GET_SPATIAL_IDX(2, 1, 0) / DST_HEIGHT) / DST_DEPTH;
+
+ __global unsigned char *in_base_ptr = input_ptr + input_offset_first_element_in_bytes + idx_out_c * sizeof(DATA_TYPE) + idx_out_n * input_stride_v;
+
+ __global unsigned char *out_base_ptr = output_ptr + output_offset_first_element_in_bytes + idx_out_c * sizeof(DATA_TYPE) + idx_out_w * output_stride_y + idx_out_h * output_stride_z + idx_out_d *
+ output_stride_w + idx_out_n * output_stride_v;
+
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
+ res0 = INITIAL_VALUE;
+
+ int idx_in_w = idx_out_w * STRIDE_X - (int)PAD_X;
+ int idx_in_h = idx_out_h * STRIDE_Y - (int)PAD_Y;
+ int idx_in_d = idx_out_d * STRIDE_Z - (int)PAD_Z;
+
+ // The start of width to consider in calculation should exclude padding
+ int pool_x_s = max((int)0, -idx_in_w);
+ // Assumed Symmetric Padding (left padding = right padding = PAD_X), the filter end should be either the pool width or what is remaining from current pos to the (src width + pad right)
+ int pool_x_e = min((int)POOL_SIZE_X, (int)SRC_WIDTH + PAD_X - idx_in_w);
+ int pool_y_s = max((int)0, -idx_in_h);
+ int pool_y_e = min((int)POOL_SIZE_Y, (int)SRC_HEIGHT + PAD_Y - idx_in_h);
+ int pool_z_s = max((int)0, -idx_in_d);
+ int pool_z_e = min((int)POOL_SIZE_Z, (int)SRC_DEPTH + PAD_Z - idx_in_d);
+
+ // The filter size with all padding in all directions considered.
+ int filter_size = pool_z_e * pool_y_e * pool_x_e;
+
+ // The end of width to consider in calculation should exclude PAD_X
+ pool_x_e = min(pool_x_e, SRC_WIDTH - idx_in_w);
+ pool_y_e = min(pool_y_e, SRC_HEIGHT - idx_in_h);
+ pool_z_e = min(pool_z_e, SRC_DEPTH - idx_in_d);
+
+#if defined(EXCLUDE_PADDING)
+ filter_size = (pool_z_e - pool_z_s) * (pool_y_e - pool_y_s) * (pool_x_e - pool_x_s);
+#endif // defined(EXCLUDE_PADDING)
+
+#if POOL_SIZE_X == SRC_WIDTH && POOL_SIZE_Y == SRC_HEIGHT && POOL_SIZE_Z == SRC_DEPTH && PAD_X == 0 && PAD_Y == 0 && PAD_Z == 0
+ // Global pooling path
+ for(int z = 0; z < POOL_SIZE_Z; ++z)
+ {
+ int depth_offset_src = (z + idx_in_d) * input_stride_w;
+ for(int y = 0; y < POOL_SIZE_Y; ++y)
+ {
+ int height_offset_src = (y + idx_in_h) * input_stride_z;
+#pragma unroll 8
+ for(int x = 0; x < POOL_SIZE_X; ++x)
+ {
+ int width_offset_src = (x + idx_in_w) * input_stride_y;
+#else // POOL_SIZE_X == SRC_WIDTH && POOL_SIZE_Y == SRC_HEIGHT && POOL_SIZE_Z == SRC_DEPTH && PAD_X == 0 && PAD_Y == 0 && PAD_Z == 0
+ for(int z = pool_z_s; z < pool_z_e; ++z)
+ {
+ int depth_offset_src = (z + idx_in_d) * input_stride_w;
+ for(int y = pool_y_s; y < pool_y_e; ++y)
+ {
+ int height_offset_src = (y + idx_in_h) * input_stride_z;
+#pragma unroll 8
+ for(int x = pool_x_s; x < pool_x_e; ++x)
+ {
+ int width_offset_src = (x + idx_in_w) * input_stride_y;
+#endif // POOL_SIZE_X == SRC_WIDTH && POOL_SIZE_Y == SRC_HEIGHT && POOL_SIZE_Z == SRC_DEPTH && PAD_X == 0 && PAD_Y == 0 && PAD_Z == 0
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
+ data0;
+#if defined(FP_MIXED_PRECISION)
+ // In case of FP_MIXED_PRECISION, ACC_DATA_TYPE is != DATA_TYPE
+ data0 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + width_offset_src + height_offset_src + depth_offset_src)),
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
+#else // defined(FP_MIXED_PRECISION)
+ data0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + width_offset_src + height_offset_src + depth_offset_src));
+#endif // defined(FP_MIXED_PRECISION)
+
+#if defined(POOL_L2)
+ // Raise to power of 2 for L2 Pooling
+ data0 *= data0;
+#endif // defined(POOL_L2)
+ res0 = POOL_OP(res0, data0);
+ }
+ }
+ }
+
+#if defined(POOL_AVG) || defined(POOL_L2)
+ res0 /= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))filter_size;
+#endif // defined(POOL_AVG) || defined(POOL_L2)
+
+#if defined(POOL_L2)
+ // Take square root of the result in L2 pooling
+ res0 = SQRT_OP(res0);
+#endif // defined(POOL_L2)
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ out_q0 = CONVERT(res0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+
+
+
+ // Store result
+#if defined(QUANTIZED)
+ STORE_VECTOR_SELECT(out_q, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
+#elif defined(FP_MIXED_PRECISION)
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res_converted0 = CONVERT(res0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+ STORE_VECTOR_SELECT(res_converted, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
+#else // defined(FP_MIXED_PRECISION)
+ STORE_VECTOR_SELECT(res, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
+#endif // defined(FP_MIXED_PRECISION)
+}
+#endif // defined(POOL_SIZE_X) && defined(POOL_SIZE_Y) && defined(POOL_SIZE_Z)
+#endif // defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(SRC_DEPTH) && defined(DST_CHANNELS) && defined(DST_HEIGHT) && defined(DST_DEPTH) && defined(DST_BATCH_SIZE) && defined(ACC_DATA_TYPE)
diff --git a/src/core/CL/cl_kernels/nhwc/pooling_3d_layer_quantized.cl b/src/core/CL/cl_kernels/nhwc/pooling_3d_layer_quantized.cl
new file mode 100644
index 0000000000..abf0db9d07
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/pooling_3d_layer_quantized.cl
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h" // Needed for GET_SPATIAL_IDX()
+
+#if defined(POOL_AVG)
+#define POOL_OP(x, y) ((x) + (y))
+#else /* defined(POOL_AVG) */
+#define POOL_OP(x, y) (max((x), (y)))
+#endif /* defined(POOL_AVG) */
+
+#define SQRT_OP(x) sqrt((x))
+
+#if defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(SRC_DEPTH) && defined(DST_CHANNELS) && defined(DST_HEIGHT) && defined(DST_DEPTH) && defined(DST_BATCH_SIZE) && defined(ACC_DATA_TYPE)
+
+#if defined(POOL_SIZE_X) && defined(POOL_SIZE_Y) && defined(POOL_SIZE_Z)
+
+#if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT)
+#define VEC_FLOAT(VEC_SIZE) VEC_DATA_TYPE(float, VEC_SIZE)
+#define VEC_INT(VEC_SIZE) VEC_DATA_TYPE(int, VEC_SIZE)
+#define CONVERT_RTE(x, type) (convert_##type##_rte((x)))
+#define CONVERT_DOWN(x, type) CONVERT_RTE(x, type)
+#define REQUANTIZE(VEC_SIZE, input, in_offset, out_offset, in_scale, out_scale, res) \
+ { \
+ const VEC_FLOAT(VEC_SIZE) in_f32 = (CONVERT(input, VEC_FLOAT(VEC_SIZE)) - (VEC_FLOAT(VEC_SIZE))((float)in_offset)) * (VEC_FLOAT(VEC_SIZE))((float)in_scale); \
+ const VEC_FLOAT(VEC_SIZE) out_f32 = in_f32 / ((VEC_FLOAT(VEC_SIZE))(float)out_scale) + ((VEC_FLOAT(VEC_SIZE))((float)out_offset)); \
+ res = CONVERT_SAT(CONVERT_DOWN(out_f32, VEC_INT(VEC_SIZE)), VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \
+ }
+#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
+
+#if defined(POOL_L2)
+#error "L2 pooling is not supported"
+#endif /* defined(POOL_L2) */
+
+/** Performs 3d pooling layer of size equal to MxNXD. This OpenCL kernel can perform the following pooling types:
+ * -# max, -DPOOL_MAX must be passed at compile time
+ * -# average, -DPOOL_AVG must be passed at compile time. If padding has to be excluded, -DEXCLUDE_PADDING should be passed at compile time
+ *
+ * @note Datatype must be passed at compile type using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are QASYMM8_SIGNED, QASYMM8
+ * @note Accumulation data type must be passed at compile time using -DACC_DATA_TYPE e.g. -DACC_DATA_TYPE=float
+ * @note If -DFP_MIXED_PRECISION is passed at compile time, the kernel will use F32 for the partial result
+ * @note Pool size must be passed at compile time using -DPOOL_SIZE_X, -DPOOL_SIZE_Y, and -DPOOL_SIZE_Z. e.g. -DPOOL_SIZE_X=4, -DPOOL_SIZE_Y=4, -DPOOL_SIZE_Z=2
+ * @note Input tensor width, height and depth must be passed at compile time using -DSRC_WIDTH, -DSRC_HEIGHT, and -DSRC_DEPTH
+ * @note Output tensor height, channels, depth, and batch size must be passed at compile time using -DDST_HEIGHT, -DDST_CHANNELS, -DDST_DEPTH, and -DDST_BATCH_SIZE
+ * @note Pool strides must be passed at compile time using -DSTRIDE_X, -DSTRIDE_Y and -DSTRIDE_Z which are the steps of the window along the x, y and z directions
+ * @note Pool pads must be passed at compile time using -DPAD_X, -DPAD_Y, -DPAD_Z
+ * @note Vector size must be passed at compile time using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size must be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8_SIGNED, QASYMM8
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] input_stride_v Stride of the source tensor in V dimension (in bytes)
+ * @param[in] input_step_v input_stride_v * number of elements along V processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] output_step_w output_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] output_stride_v Stride of the destination tensor in V dimension (in bytes)
+ * @param[in] output_step_v output_stride_v * number of elements along V processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void pooling_3d_layer_MxN_ndhwc_quantized(
+ TENSOR5D_DECLARATION(input),
+ TENSOR5D_DECLARATION(output))
+{
+ // Note: If C is not multiple of VEC_SIZE, we shift back of VEC_SIZE_LEFTOVER elements to compute the leftover elements for get_global_id(0) == 0
+ // Note: If C is less than VEC_SIZE, VEC_SIZE should be shrunk to the closest smaller VEC_SIZE. This operation is performed on the host side
+ int idx_out_c = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER);
+ int idx_out_w = GET_SPATIAL_IDX(1, 1, 0);
+
+ // The depth size dimension and the batch size dimension are collapsed over the height dimension
+ int idx_out_h = GET_SPATIAL_IDX(2, 1, 0) % DST_HEIGHT;
+ int idx_out_d = (GET_SPATIAL_IDX(2, 1, 0) / DST_HEIGHT) % DST_DEPTH;
+ int idx_out_n = (GET_SPATIAL_IDX(2, 1, 0) / DST_HEIGHT) / DST_DEPTH;
+
+ __global unsigned char *in_base_ptr = input_ptr + input_offset_first_element_in_bytes + idx_out_c * sizeof(DATA_TYPE) + idx_out_n * input_stride_v;
+
+ __global unsigned char *out_base_ptr = output_ptr + output_offset_first_element_in_bytes + idx_out_c * sizeof(DATA_TYPE) + idx_out_w * output_stride_y + idx_out_h * output_stride_z + idx_out_d *
+ output_stride_w + idx_out_n * output_stride_v;
+
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
+ res0 = INITIAL_VALUE;
+
+ int idx_in_w = idx_out_w * STRIDE_X - (int)PAD_X;
+ int idx_in_h = idx_out_h * STRIDE_Y - (int)PAD_Y;
+ int idx_in_d = idx_out_d * STRIDE_Z - (int)PAD_Z;
+
+ // The start of width to consider in calculation should exclude padding
+ int pool_x_s = max((int)0, -idx_in_w);
+ // Assumed Symmetric Padding (left padding = right padding = PAD_X), the filter end should be either the pool width or what is remaining from current pos to the (src width + pad right)
+ int pool_x_e = min((int)POOL_SIZE_X, (int)SRC_WIDTH + PAD_X - idx_in_w);
+ int pool_y_s = max((int)0, -idx_in_h);
+ int pool_y_e = min((int)POOL_SIZE_Y, (int)SRC_HEIGHT + PAD_Y - idx_in_h);
+ int pool_z_s = max((int)0, -idx_in_d);
+ int pool_z_e = min((int)POOL_SIZE_Z, (int)SRC_DEPTH + PAD_Z - idx_in_d);
+
+#if defined(POOL_AVG) && defined(EXCLUDE_PADDING)
+ int filter_size = 0;
+#elif defined(POOL_AVG) && !defined(EXCLUDE_PADDING) // defined(POOL_AVG) && defined(EXCLUDE_PADDING)
+ int filter_size = pool_z_e * pool_y_e * pool_x_e;
+#endif // defined(POOL_AVG) && !defined(EXCLUDE_PADDING)
+
+ // The end of width to consider in calculation should exclude PAD_X
+ pool_x_e = min(pool_x_e, SRC_WIDTH - idx_in_w);
+ pool_y_e = min(pool_y_e, SRC_HEIGHT - idx_in_h);
+ pool_z_e = min(pool_z_e, SRC_DEPTH - idx_in_d);
+
+ for(int z = pool_z_s; z < pool_z_e; ++z)
+ {
+ int depth_offset_src = (z + idx_in_d) * input_stride_w;
+ for(int y = pool_y_s; y < pool_y_e; ++y)
+ {
+ int height_offset_src = (y + idx_in_h) * input_stride_z;
+#pragma unroll 8
+ for(int x = pool_x_s; x < pool_x_e; ++x)
+ {
+ int width_offset_src = (x + idx_in_w) * input_stride_y;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data;
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
+ data0;
+
+ data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + width_offset_src + height_offset_src + depth_offset_src));
+ data0 = CONVERT(data, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
+
+ res0 = POOL_OP(res0, data0);
+
+#if defined(POOL_AVG) && defined(EXCLUDE_PADDING)
+ filter_size++;
+#endif // defined(POOL_AVG) && defined(EXCLUDE_PADDING)
+ }
+ }
+ }
+
+#if defined(POOL_AVG)
+ res0 = (res0 + (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))(filter_size >> 1)) / filter_size;
+#endif // defined(POOL_AVG)
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ out_q0 = CONVERT(res0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+
+#if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT)
+ REQUANTIZE(VEC_SIZE, out_q0, OFFSET_IN1, OFFSET_OUT, SCALE_IN1, SCALE_OUT, out_q0);
+#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
+
+ STORE_VECTOR_SELECT(out_q, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
+}
+#endif // defined(POOL_SIZE_X) && defined(POOL_SIZE_Y) && defined(POOL_SIZE_Z)
+#endif // defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(SRC_DEPTH) && defined(DST_CHANNELS) && defined(DST_HEIGHT) && defined(DST_DEPTH) && defined(DST_BATCH_SIZE) && defined(ACC_DATA_TYPE)
diff --git a/src/core/CL/cl_kernels/nhwc/pooling_layer.cl b/src/core/CL/cl_kernels/nhwc/pooling_layer.cl
new file mode 100644
index 0000000000..5b59ff5088
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/pooling_layer.cl
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "repeat.h"
+#include "tile_helpers.h"
+
+#if defined(POOL_AVG) || defined(POOL_L2)
+#define POOL_OP(x, y) ((x) + (y))
+#else /* defined(POOL_AVG) || defined(POOL_L2) */
+#define POOL_OP(x, y) (fmax((x), (y)))
+#endif /* defined(POOL_AVG) || defined(POOL_L2) */
+
+#if defined(POOL_L2)
+#define POW2_OP(x, vec_size) ((x) * (x))
+#else /* defined(POOL_L2) */
+#define POW2_OP(x, vec_size) (x)
+#endif /* defined(POOL_L2) */
+
+#define DIV_OP(x, y) (x * (1.f / y))
+#define SQRT_OP(x) sqrt((x))
+
+#if defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_CHANNELS) && defined(DST_HEIGHT) && defined(DST_BATCH_SIZE) && defined(ACC_DATA_TYPE)
+
+#if defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
+/** Performs pooling layer of size equal to MxN. This OpenCL kernel can perform the following pooling types:
+ * -# max, -DPOOL_MAX must be passed at compile time
+ * -# average, -DPOOL_AVG must be passed at compile time. If padding has to be expluded, -DEXCLUDE_PADDING should be passed at compile time
+ * -# l2 normalisation, -DPOOL_L2 must be passed at compile time
+ *
+ * @note Datatype must be passed at compile type using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F32/F16
+ * @note Accumulation data type must be passed at compile time using -DACC_DATA_TYPE e.g. -DACC_DATA_TYPE=float
+ * @note If -DFP_MIXED_PRECISION is passed at compile time, the kernel will use F32 for the partial result
+ * @note Pool size must be passed at compile time using -DPOOL_SIZE_X and -DPOOL_SIZE_Y. e.g. -DPOOL_SIZE_X=4, -DPOOL_SIZE_Y=4
+ * @note Input tensor width and height must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT
+ * @note Output tensor height, channels and batch size must be passed at compile time using -DDST_HEIGHT, -DDST_CHANNELS and -DDST_BATCH_SIZE
+ * @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
+ * @note Pool pads must be passed at compile time using -DPAD_X and -DPAD_Y
+ * @note Vector size must be passed at compile time using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size must be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] output_step_w output_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void pooling_layer_MxN_nhwc(
+ TENSOR4D_DECLARATION(input),
+ TENSOR4D_DECLARATION(output))
+{
+ // Note: If C is not multiple of VEC_SIZE, we shift back of VEC_SIZE_LEFTOVER elements to compute the leftover elements for get_global_id(0) == 0
+ // Note: If C is less than VEC_SIZE, VEC_SIZE should be SHRINKED to the closest smaller VEC_SIZE. This operation is performed on the host side
+ int idx_out_c = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER);
+ int idx_out_w = GET_SPATIAL_IDX(1, 1, 0);
+#if DST_BATCH_SIZE != 1
+ // If batch size != 1, the batch size dimension is collapsed over the height dimension
+ int idx_out_h = GET_SPATIAL_IDX(2, 1, 0) % DST_HEIGHT;
+ int idx_out_n = GET_SPATIAL_IDX(2, 1, 0) / DST_HEIGHT;
+#else //DST_BATCH_SIZE != 1
+ int idx_out_h = GET_SPATIAL_IDX(2, 1, 0);
+ int idx_out_n = 0;
+#endif // DST_BATCH_SIZE != 1
+
+ __global unsigned char *in_base_ptr = input_ptr + input_offset_first_element_in_bytes + idx_out_c * sizeof(DATA_TYPE) + idx_out_n * input_stride_w;
+
+ __global unsigned char *out_base_ptr = output_ptr + output_offset_first_element_in_bytes + idx_out_c * sizeof(DATA_TYPE) + idx_out_w * output_stride_y + idx_out_h * output_stride_z + idx_out_n *
+ output_stride_w;
+
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
+ res0 = INITIAL_VALUE;
+
+ int idx_in_w = idx_out_w * STRIDE_X - PAD_X;
+ int idx_in_h = idx_out_h * STRIDE_Y - PAD_Y;
+
+ int pool_x_s = max((int)0, -idx_in_w);
+ int pool_x_e = min((int)POOL_SIZE_X, (int)SRC_WIDTH - idx_in_w);
+ int pool_y_s = max((int)0, -idx_in_h);
+ int pool_y_e = min((int)POOL_SIZE_Y, (int)SRC_HEIGHT - idx_in_h);
+
+#if defined(EXCLUDE_PADDING)
+ int filter_size = (pool_y_e - pool_y_s) * (pool_x_e - pool_x_s);
+#else // defined(EXCLUDE_PADDING)
+ int filter_size = POOL_SIZE_X * POOL_SIZE_Y;
+#endif // defined(EXCLUDE_PADDING)
+
+#if POOL_SIZE_X == SRC_WIDTH && POOL_SIZE_Y == SRC_HEIGHT && PAD_X == 0 && PAD_Y == 0
+ // Global pooling path
+ for(int y = 0; y < POOL_SIZE_Y; ++y)
+ {
+#pragma unroll 8
+ for(int x = 0; x < POOL_SIZE_X; ++x)
+ {
+#else // POOL_SIZE_X == SRC_WIDTH && POOL_SIZE_Y == SRC_HEIGHT && PAD_X == 0 && PAD_Y == 0
+ for(int y = pool_y_s; y < pool_y_e; ++y)
+ {
+#pragma unroll 8
+ for(int x = pool_x_s; x < pool_x_e; ++x)
+ {
+#endif // POOL_SIZE_X == SRC_WIDTH && POOL_SIZE_Y == SRC_HEIGHT && PAD_X == 0 && PAD_Y == 0
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
+ data0;
+#if defined(FP_MIXED_PRECISION)
+ // In case of FP_MIXED_PRECISION, ACC_DATA_TYPE is != DATA_TYPE
+ data0 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + (x + idx_in_w) * input_stride_y + (y + idx_in_h) * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
+#else // defined(FP_MIXED_PRECISION)
+ data0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + (x + idx_in_w) * input_stride_y + (y + idx_in_h) * input_stride_z));
+#endif // defined(FP_MIXED_PRECISION)
+
+#if defined(POOL_L2)
+ // Raise to power of 2 for L2 Pooling
+ data0 *= data0;
+#endif // defined(POOL_L2)
+ res0 = POOL_OP(res0, data0);
+ }
+ }
+
+#if defined(POOL_AVG) || defined(POOL_L2)
+ res0 /= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))filter_size;
+#endif // defined(POOL_AVG) || defined(POOL_L2)
+
+#if defined(POOL_L2)
+ // Take square root of the result in L2 pooling
+ res0 = SQRT_OP(res0);
+#endif // defined(POOL_L2)
+
+ // Store result
+#if defined(FP_MIXED_PRECISION)
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res_converted0 = CONVERT(res0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+ STORE_VECTOR_SELECT(res_converted, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
+#else // defined(FP_MIXED_PRECISION)
+ STORE_VECTOR_SELECT(res, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
+#endif // defined(FP_MIXED_PRECISION)
+}
+#endif // defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
+
+#define SELECT_TYPE SELECT_VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
+
+/** Performs pooling layer of size equal to 2. This OpenCL kernel can perform the following pooling types:
+ * -# max, -DPOOL_MAX must be passed at compile time
+ * -# max extracting the max index, -DPOOL_MAX and -DEXTRACT_MAX_INDEX must be passed at compile time
+ * -# average, -DPOOL_AVG must be passed at compile time. If padding has to be expluded, -DEXCLUDE_PADDING should be passed at compile time
+ * -# l2 normalisation, -DPOOL_L2 must be passed at compile time
+ *
+ * @note Datatype must be passed at compile type using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F32/F16
+ * @note Accumulation data type must be passed at compile time using -DACC_DATA_TYPE e.g. -DACC_DATA_TYPE=float
+ * @note If -DFP_MIXED_PRECISION is passed at compile time, the kernel will use F32 for the partial result
+ * @note Input tensor width and height must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT
+ * @note Output tensor height, channels and batch size must be passed at compile time using -DDST_HEIGHT, -DDST_CHANNELS and -DDST_BATCH_SIZE
+ * @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
+ * @note Pool pads must be passed at compile time using -DPAD_X and -DPAD_Y
+ * @note Vector size must be passed at compile time using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size must be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] output_step_w output_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] indices_ptr (Optional) Pointer to the indices tensor. Supported data types: U32
+ * @param[in] indices_stride_x (Optional) Stride of the indices tensor in X dimension (in bytes)
+ * @param[in] indices_step_x (Optional) indices_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] indices_stride_y (Optional) Stride of the indices tensor in Y dimension (in bytes)
+ * @param[in] indices_step_y (Optional) indices_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] indices_stride_z (Optional) Stride of the indices tensor in Z dimension (in bytes)
+ * @param[in] indices_step_z (Optional) indices_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] indices_stride_w (Optional) Stride of the indices tensor in W dimension (in bytes)
+ * @param[in] indices_step_w (Optional) indices_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] indices_offset_first_element_in_bytes (Optional) The offset of the first element in the indices tensor
+ */
+__kernel void pooling_layer_2x2_nhwc(
+ TENSOR4D_DECLARATION(input),
+ TENSOR4D_DECLARATION(output)
+#if defined(EXTRACT_MAX_INDEX) && defined(POOL_MAX)
+ ,
+ TENSOR4D_DECLARATION(indices)
+#endif // defined(EXTRACT_MAX_INDEX) && defined(POOL_MAX)
+)
+{
+ // Note: If C is not multiple of VEC_SIZE, we shift back of VEC_SIZE_LEFTOVER elements to compute the leftover elements for get_global_id(0) == 0
+ // Note: If C is less than VEC_SIZE, VEC_SIZE should be SHRINKED to the closest smaller VEC_SIZE. This operation is performed on the host side
+ int idx_out_c = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
+ int idx_out_w = get_global_id(1);
+#if DST_BATCH_SIZE != 1
+ // If batch size != 1, the batch size dimension is collapsed over the height dimension
+ int idx_out_h = get_global_id(2) % DST_HEIGHT;
+ int idx_out_n = get_global_id(2) / DST_HEIGHT;
+#else //SRC_BATCH_SIZE != 1
+ int idx_out_h = get_global_id(2);
+ int idx_out_n = 0;
+#endif // SRC_BATCH_SIZE != 1
+
+ int idx_in_w = idx_out_w * STRIDE_X - PAD_X;
+ int idx_in_h = idx_out_h * STRIDE_Y - PAD_Y;
+
+ __global unsigned char *in_base_ptr = input_ptr + input_offset_first_element_in_bytes + idx_out_c * sizeof(DATA_TYPE) + idx_out_n * input_stride_w;
+
+ __global unsigned char *out_base_ptr = output_ptr + output_offset_first_element_in_bytes + idx_out_c * sizeof(DATA_TYPE) + idx_out_w * output_stride_y + idx_out_h * output_stride_z + idx_out_n *
+ output_stride_w;
+
+ int pool_x_s = max((int)0, -idx_in_w);
+ int pool_x_e = min((int)2, (int)SRC_WIDTH - idx_in_w);
+ int pool_y_s = max((int)0, -idx_in_h);
+ int pool_y_e = min((int)2, (int)SRC_HEIGHT - idx_in_h);
+
+ int filter_size = (pool_x_e - pool_x_s) * (pool_y_e - pool_y_s);
+
+ int x0 = pool_x_s + idx_in_w;
+ int y0 = pool_y_s + idx_in_h;
+ int x1 = pool_x_e - 1 + idx_in_w;
+ int y1 = pool_y_e - 1 + idx_in_h;
+
+ REPEAT_VAR_INIT_TO_CONST(4, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE), data, 0);
+
+#if defined(FP_MIXED_PRECISION)
+ // In case of FP_MIXED_PRECISION, ACC_DATA_TYPE is != DATA_TYPE
+ data0 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x0 * input_stride_y + y0 * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
+ data1 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x1 * input_stride_y + y0 * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
+ data2 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x0 * input_stride_y + y1 * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
+ data3 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x1 * input_stride_y + y1 * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
+#else // defined(FP_MIXED_PRECISION)
+ data0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x0 * input_stride_y + y0 * input_stride_z));
+ data1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x1 * input_stride_y + y0 * input_stride_z));
+ data2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x0 * input_stride_y + y1 * input_stride_z));
+ data3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x1 * input_stride_y + y1 * input_stride_z));
+#endif // defined(FP_MIXED_PRECISION)
+
+#if !defined(POOL_MAX)
+ if(filter_size != 4)
+ {
+ SELECT_TYPE cond_w_s = (SELECT_TYPE)idx_in_w < (SELECT_TYPE)0;
+ SELECT_TYPE cond_w_e = (SELECT_TYPE)idx_in_w >= (SELECT_TYPE)(SRC_WIDTH - 1);
+ SELECT_TYPE cond_h_s = (SELECT_TYPE)idx_in_h < (SELECT_TYPE)0;
+ SELECT_TYPE cond_h_e = (SELECT_TYPE)idx_in_h >= (SELECT_TYPE)(SRC_HEIGHT - 1);
+
+ // Make invalid the values loaded if the x or y coordinate was clamped (out-of-bound)
+ data0 = select(data0, (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))INITIAL_VALUE, (SELECT_TYPE)(cond_w_s | cond_h_s));
+ data1 = select(data1, (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))INITIAL_VALUE, (SELECT_TYPE)(cond_w_e | cond_h_s));
+ data2 = select(data2, (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))INITIAL_VALUE, (SELECT_TYPE)(cond_w_s | cond_h_e));
+ data3 = select(data3, (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))INITIAL_VALUE, (SELECT_TYPE)(cond_w_e | cond_h_e));
+ }
+#endif // !defined(POOL_MAX)
+
+#if defined(POOL_L2)
+ // Raise to power of 2 for L2 Pooling
+ data0 *= data0;
+ data1 *= data1;
+ data2 *= data2;
+ data3 *= data3;
+#endif /* defined(POOL_L2) */
+
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
+ res0 = data0;
+ res0 = POOL_OP(res0, data1);
+ res0 = POOL_OP(res0, data2);
+ res0 = POOL_OP(res0, data3);
+
+#if defined(POOL_AVG) || defined(POOL_L2)
+#if defined(EXCLUDE_PADDING)
+ res0 /= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))filter_size;
+#else // !defined(EXCLUDE_PADDING)
+ res0 /= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))4;
+#endif // defined(EXCLUDE_PADDING)
+#endif // defined(POOL_AVG) || defined(POOL_L2)
+
+#if defined(POOL_L2)
+ // Take square root of the result in L2 pooling
+ res0 = SQRT_OP(res0);
+#endif // defined(POOL_L2)
+
+ // Store result
+#if defined(FP_MIXED_PRECISION)
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res_converted0 = CONVERT(res0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+ STORE_VECTOR_SELECT(res_converted, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
+#else // defined(FP_MIXED_PRECISION)
+ STORE_VECTOR_SELECT(res, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
+#endif // defined(FP_MIXED_PRECISION)
+
+#if defined(EXTRACT_MAX_INDEX) && defined(POOL_MAX)
+
+ // This part is used to return the index of the maximum value
+ // Note: DST_CHANNELS and DST_BATCH_SIZE can be used for either the input and output tensor
+
+ // note: Batch dimension does not contribute in the offset contribution
+ VEC_DATA_TYPE(uint, VEC_SIZE)
+ base_index = (uint)idx_out_c;
+
+ base_index += VEC_OFFS(uint, VEC_SIZE);
+
+ VEC_DATA_TYPE(uint, VEC_SIZE)
+ index0 = base_index + (uint)x0 * DST_CHANNELS + (uint)y0 * (DST_CHANNELS * SRC_WIDTH);
+ VEC_DATA_TYPE(uint, VEC_SIZE)
+ index1 = base_index + (uint)x1 * DST_CHANNELS + (uint)y0 * (DST_CHANNELS * SRC_WIDTH);
+ VEC_DATA_TYPE(uint, VEC_SIZE)
+ index2 = base_index + (uint)x0 * DST_CHANNELS + (uint)y1 * (DST_CHANNELS * SRC_WIDTH);
+ VEC_DATA_TYPE(uint, VEC_SIZE)
+ index3 = base_index + (uint)x1 * DST_CHANNELS + (uint)y1 * (DST_CHANNELS * SRC_WIDTH);
+
+ index0 = select(index1, index0, CONVERT(isgreaterequal(data0, data1), VEC_DATA_TYPE(int, VEC_SIZE)));
+ index1 = select(index3, index2, CONVERT(isgreaterequal(data2, data3), VEC_DATA_TYPE(int, VEC_SIZE)));
+ index0 = select(index1, index0, CONVERT(isgreaterequal(max(data0, data1), max(data2, data3)), VEC_DATA_TYPE(int, VEC_SIZE)));
+
+ __global unsigned char *idx_base_ptr = indices_ptr + indices_offset_first_element_in_bytes + idx_out_c * sizeof(uint) + idx_out_w * indices_stride_y + idx_out_h * indices_stride_z + idx_out_n *
+ indices_stride_w;
+
+ // Store result
+ STORE_VECTOR_SELECT(index, uint, idx_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, ((VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0));
+#endif // defined(EXTRACT_MAX_INDEX) && defined(POOL_MAX)
+}
+#endif // defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_CHANNELS) && defined(DST_HEIGHT) && defined(DST_BATCH_SIZE) && defined(ACC_DATA_TYPE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/pooling_layer_quantized.cl b/src/core/CL/cl_kernels/nhwc/pooling_layer_quantized.cl
index d8cef2b4e6..46268a4a88 100644
--- a/src/core/CL/cl_kernels/pooling_layer_quantized.cl
+++ b/src/core/CL/cl_kernels/nhwc/pooling_layer_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,108 +51,6 @@
#error "L2 pooling is not supported"
#endif /* defined(POOL_L2) */
-int calculate_avg_scale(const int pool_size_x, const int pool_size_y, const int upper_bound_w, const int upper_bound_h,
- const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int start_x = get_global_id(0) * stride_x - pad_x;
- int start_y = get_global_id(1) * stride_y - pad_y;
- const int end_x = min(start_x + pool_size_x, upper_bound_w);
- const int end_y = min(start_y + pool_size_y, upper_bound_h);
-#if defined(EXCLUDE_PADDING)
- start_x = max(0, start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
- return ((end_y - start_y) * (end_x - start_x));
-}
-
-/** Performs a pooling function of pool size equal to N (NCHW)
- *
- * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
- * @note In case of average pooling the following information must be passed at compile time:
- * -DPOOL_AVG must be provided otherwise max pooling will be performed.
- * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
- * @note Input data type must be passed at compile time using -DDAT_TYPE=type, e.g. -DDATA_TYPE=uchar
- * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
- *
- * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] input_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
- */
-__kernel void pooling_layer_MxN_quantized_nchw(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- int8 vdata = INITIAL_VALUE;
- int sdata = INITIAL_VALUE;
-
- // Load data
- for(int y = 0; y < POOL_SIZE_Y; y++)
- {
- int x = 0;
- for(; x <= ((int)POOL_SIZE_X - 8); x += 8)
- {
- VEC_TYPE(8)
- data = vload8(0, (__global DATA_TYPE *)tensor3D_offset(&input, x, y, 0));
- int8 data0 = convert_int8(data);
- vdata = POOL_OP(vdata, data0);
- }
-
- // Leftover
- for(; x < (int)POOL_SIZE_X; ++x)
- {
- DATA_TYPE data = *((__global DATA_TYPE *)tensor3D_offset(&input, x, y, 0));
- int data0 = convert_int(data);
- sdata = POOL_OP(sdata, data0);
- }
- }
-
- // Reduce result
- int4 reduce4 = POOL_OP(vdata.s0123, vdata.s4567);
- int2 reduce2 = POOL_OP(reduce4.s01, reduce4.s23);
- int res = POOL_OP(reduce2.s0, reduce2.s1);
- res = POOL_OP(res, sdata);
-
-#if defined(POOL_AVG)
- res = round(DIV_OP(res, calculate_avg_scale(POOL_SIZE_X, POOL_SIZE_Y, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y)));
-#endif /* defined(POOL_AVG) */
-
- DATA_TYPE result_q8 = CONVERT(res, DATA_TYPE);
-
-#if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT)
-
- const float result_f32 = convert_float(result_q8);
- const float input_offset = (float)OFFSET_IN1;
- const float input_scale = (float)SCALE_IN1;
- const float scale_out = (float)SCALE_OUT;
- const float offset_out = (float)OFFSET_OUT;
- const float in_f32 = (result_f32 - input_offset) * input_scale;
- const float out_f32 = in_f32 / scale_out + offset_out;
- result_q8 = CONVERT_SAT(convert_int_rte(out_f32), DATA_TYPE);
-
-#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
-
- *(__global DATA_TYPE *)output.ptr = result_q8;
-}
-
#if defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_CHANNELS) && defined(DST_HEIGHT) && defined(DST_BATCH_SIZE) && defined(ACC_DATA_TYPE)
/** Performs pooling layer of size equal to MxN. This OpenCL kernel can perform the following pooling types:
* -# max, -DPOOL_MAX must be passed at compile time
diff --git a/src/core/CL/cl_kernels/nhwc/reorg_layer.cl b/src/core/CL/cl_kernels/nhwc/reorg_layer.cl
new file mode 100644
index 0000000000..a340b0b8a2
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/reorg_layer.cl
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(SRC_DEPTH) && defined(STRIDE)
+
+#define CALCULATE_SRC_COORDINATES(xo, yo, zo, xi, yi, zi) \
+ ({ \
+ int offset = zo / (int)SRC_DEPTH; \
+ xi = xo * (int)STRIDE + offset % (int)STRIDE; \
+ yi = yo * (int)STRIDE + offset / (int)STRIDE; \
+ zi = zo % SRC_DEPTH; \
+ })
+
+/** Performs a reorganization layer of input tensor to the output tensor when the data layout is NHWC
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The depth of the input tensor must be passed at compile time using -DSRC_DEPTH: e.g. -DSRC_DEPTH=64
+ * @note The distance between 2 consecutive pixels along the x and y direction must be passed at compile time using -DSTRIDE: e.g. -DSTRIDE=2
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void reorg_layer_nhwc(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(dst);
+
+ int xo = get_global_id(1);
+ int yo = get_global_id(2);
+ int zo = get_global_id(0);
+ int xi, yi, zi;
+
+ CALCULATE_SRC_COORDINATES(xo, yo, zo, xi, yi, zi);
+
+ int src_offset = zi * sizeof(DATA_TYPE) + xi * src_stride_y + yi * src_stride_z;
+
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + src_offset));
+}
+#endif // // defined(DATA_TYPE) && defined(SRC_DEPTH) && defined(STRIDE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nhwc/scale.cl b/src/core/CL/cl_kernels/nhwc/scale.cl
new file mode 100644
index 0000000000..e071b0f192
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/scale.cl
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2016-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#if defined(SCALE_NEAREST_NEIGHBOUR)
+//! @cond Doxygen_Suppress
+/** Performs scale on a tensor by interpolating with the NEAREAST NEIGHBOUR method. (NHWC)
+ *
+ * @note Sampling policy to used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
+ * @note The tensor type ("BUFFER" only is supported) of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" only is supported) of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The border value value must be passed at compile time using -DCONSTANT_VALUE (e.g. -DCONSTANT_VALUE=0)
+ * @note In case of F32/F16, -DIS_FLOATING_POINT must be passed at compile time
+ * @note If the source tensor has more than 3 dimensions, -DBATCHED_EXECUTION must be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: U8/S16/F16/F32.
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_c The size of the channels dimension of the source tensor
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the batches dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: U8/S16/F16/F32.
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] scale_x The scale value to apply on the source width
+ * @param[in] scale_y The scale value to apply on the source height
+ */
+//! @endcond
+__kernel void scale_nearest_neighbour_nhwc(
+ TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
+ TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
+ const float scale_x,
+ const float scale_y)
+{
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int xo = GET_SPATIAL_IDX(1, 1, 0); // WIDTH
+#if defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0) % dst_h; // HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0) / dst_h; // BATCH SIZE IDX
+#else // defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0); // HEIGHT
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(BATCHED_EXECUTION)
+
+#ifdef SAMPLING_POLICY_TOP_LEFT
+ float xi_f = (xo * scale_x);
+ float yi_f = (yo * scale_y);
+#elif SAMPLING_POLICY_CENTER
+ float xi_f = ((xo + 0.5f) * scale_x);
+ float yi_f = ((yo + 0.5f) * scale_y);
+#else // SAMPLING_POLICY
+#error("Unsupported sampling policy");
+#endif // SAMPLING_POLICY
+
+#ifdef ALIGN_CORNERS
+ xi_f = round(xi_f);
+ yi_f = round(yi_f);
+#endif // ALIGN_CORNERS
+
+ const int xi0 = clamp((int)xi_f, 0, (int)src_w - 1);
+ const int yi0 = clamp((int)yi_f, 0, (int)src_h - 1);
+
+ TILE(SRC_DATA_TYPE, 1, N0, in00);
+
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, 1, N0, SRC_TENSOR_TYPE, src, bout, yi0, xi0, cout, src_w, src_h, 1, 1, false, in00);
+
+ TILE(uint, 1, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ dst_indirect_y[0].v = xo + (yo * (int)(dst_w)) + bout * (int)(dst_w * dst_h);
+
+ bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DST_DATA_TYPE, 1, N0, PARTIAL_N0, DST_TENSOR_TYPE, dst, cout, dst_stride_y, x_cond, in00, dst_indirect_y);
+}
+#endif /* SCALE_NEAREST_NEIGHBOUR */
+
+#if defined(SCALE_BILINEAR)
+//! @cond Doxygen_Suppress
+/** Performs scale on a tensor by interpolating with the BILINEAR method. (NHWC)
+ *
+ * @note If border mode replicate is used, is should be passed as -DBORDER_MODE_REPLICATE
+ * @note Sampling policy to used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
+ * @note The tensor type ("BUFFER" only is supported) of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" only is supported) of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The border value value must be passed at compile time using -DCONSTANT_VALUE (e.g. -DCONSTANT_VALUE=0)
+ * @note In case of F32/F16, -DIS_FLOATING_POINT must be passed at compile time
+ * @note If the source tensor has more than 3 dimensions, -DBATCHED_EXECUTION must be passed at compile time
+ *
+ * @note In case of QASYMM8, the following extra information must be passed at compile time:
+ * - The source offset e.g. -DOFFSET=4
+ * - The source scale e.g. -DSCALE=4
+ *
+ * @param[in] src_img (Not supported) Read only cl_image object for the source tensor. Included when SRC_TENSOR_TYPE=IMAGE
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: U8/S16/F16/F32.
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_c The size of the channels dimension of the source tensor
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the batches dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_img (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: U8/S16/F16/F32.
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] scale_x The scale value to apply on the source width
+ * @param[in] scale_y The scale value to apply on the source height
+ */
+//! @endcond
+__kernel void scale_bilinear_nhwc(
+ TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
+ TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
+ const float scale_x,
+ const float scale_y)
+{
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int xo = GET_SPATIAL_IDX(1, 1, 0); // WIDTH
+#if defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0) % dst_h; // HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0) / dst_h; // BATCH SIZE IDX
+#else // defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0); // HEIGHT
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(BATCHED_EXECUTION)
+
+#ifdef SAMPLING_POLICY_TOP_LEFT
+ float xi_f = (xo * scale_x);
+ float yi_f = (yo * scale_y);
+#elif SAMPLING_POLICY_CENTER
+ float xi_f = ((xo + 0.5f) * scale_x - 0.5f);
+ float yi_f = ((yo + 0.5f) * scale_y - 0.5f);
+#else // SAMPLING_POLICY
+#error("Unsupported sampling policy");
+#endif // SAMPLING_POLICY
+
+ const int xi = (int)floor(xi_f);
+ const int yi = (int)floor(yi_f);
+
+ TILE(SRC_DATA_TYPE, 1, N0, in00);
+ TILE(SRC_DATA_TYPE, 1, N0, in01);
+ TILE(SRC_DATA_TYPE, 1, N0, in10);
+ TILE(SRC_DATA_TYPE, 1, N0, in11);
+
+ // Initialize the tiles to CONSTANT_VALUE
+ in00[0].v = CONSTANT_VALUE;
+ in01[0].v = CONSTANT_VALUE;
+ in10[0].v = CONSTANT_VALUE;
+ in11[0].v = CONSTANT_VALUE;
+
+#ifndef BORDER_MODE_REPLICATE
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, 1, N0, SRC_TENSOR_TYPE, src, bout, yi, xi, cout, src_w, src_h, 1, 1, true, in00);
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, 1, N0, SRC_TENSOR_TYPE, src, bout, yi, xi + 1, cout, src_w, src_h, 1, 1, true, in01);
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, 1, N0, SRC_TENSOR_TYPE, src, bout, yi + 1, xi, cout, src_w, src_h, 1, 1, true, in10);
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, 1, N0, SRC_TENSOR_TYPE, src, bout, yi + 1, xi + 1, cout, src_w, src_h, 1, 1, true, in11);
+#else // BORDER_MODE_REPLICATE
+ const int xi0 = clamp(xi, 0, (int)src_w - 1);
+ const int yi0 = clamp(yi, 0, (int)src_h - 1);
+ const int xi1 = clamp(xi + 1, 0, (int)src_w - 1);
+ const int yi1 = clamp(yi + 1, 0, (int)src_h - 1);
+
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, 1, N0, SRC_TENSOR_TYPE, src, bout, yi0, xi0, cout, src_w, src_h, 1, 1, false, in00);
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, 1, N0, SRC_TENSOR_TYPE, src, bout, yi0, xi1, cout, src_w, src_h, 1, 1, false, in01);
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, 1, N0, SRC_TENSOR_TYPE, src, bout, yi1, xi0, cout, src_w, src_h, 1, 1, false, in10);
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, 1, N0, SRC_TENSOR_TYPE, src, bout, yi1, xi1, cout, src_w, src_h, 1, 1, false, in11);
+#endif // BORDER_MODE_REPLICATE
+
+ TILE(DST_DATA_TYPE, 1, N0, out);
+
+#if defined(IS_FLOATING_POINT)
+ const SRC_DATA_TYPE a = (SRC_DATA_TYPE)(xi_f - (float)xi);
+ const SRC_DATA_TYPE b = (SRC_DATA_TYPE)(1.f - a);
+ const SRC_DATA_TYPE a1 = (SRC_DATA_TYPE)(yi_f - (float)yi);
+ const SRC_DATA_TYPE b1 = (SRC_DATA_TYPE)(1.f - a1);
+
+ // Calculate the output
+ out[0].v = ((in00[0].v * b * b1) + (in01[0].v * a * b1) + (in10[0].v * b * a1) + (in11[0].v * a * a1));
+#else // defined(IS_FLOATING_POINT)
+
+ const float a = (xi_f - (float)xi);
+ const float b = (1.f - a);
+ const float a1 = (yi_f - (float)yi);
+ const float b1 = (1.f - a1);
+
+ out[0].v = CONVERT_SAT((CONVERT(in00[0].v, VEC_DATA_TYPE(float, N0)) * b * b1) +
+ (CONVERT(in01[0].v, VEC_DATA_TYPE(float, N0)) * a * b1) +
+ (CONVERT(in10[0].v, VEC_DATA_TYPE(float, N0)) * b * a1) +
+ (CONVERT(in11[0].v, VEC_DATA_TYPE(float, N0)) * a * a1),
+ VEC_DATA_TYPE(DST_DATA_TYPE, N0));
+#endif // defined(IS_FLOATING_POINT)
+
+ TILE(uint, 1, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ dst_indirect_y[0].v = xo + (yo * (int)(dst_w)) + bout * (int)(dst_w * dst_h);
+
+ bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DST_DATA_TYPE, 1, N0, PARTIAL_N0, DST_TENSOR_TYPE, dst, cout, dst_stride_y, x_cond, out, dst_indirect_y);
+}
+#endif /* SCALE_BILINEAR */
diff --git a/src/core/CL/cl_kernels/space_to_batch.cl b/src/core/CL/cl_kernels/nhwc/space_to_batch.cl
index cb11786ac4..695bd4c217 100644
--- a/src/core/CL/cl_kernels/space_to_batch.cl
+++ b/src/core/CL/cl_kernels/nhwc/space_to_batch.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,75 +24,6 @@
#include "helpers.h"
#if defined(BATCH_SIZE) && defined(DATA_TYPE) && defined(WIDTH_IN) && defined(HEIGHT_IN)
-/** Calculate the space to batch conversion.
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The block shape tensor rank must be passed at compile time using -DBLOCK_SHAPE_DIM. e.g. -DBLOCK_SHAPE_DIM=2
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source image
- * @param[in] paddings_ptr Pointer to the second source image. Supported data types: S32
- * @param[in] paddings_stride_x Stride of the paddinds tensor in X dimension (in bytes)
- * @param[in] paddings_step_x paddings_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] paddings_stride_y Stride of the paddinds tensor in Y dimension (in bytes)
- * @param[in] paddings_step_y paddings_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] paddingse_offset_first_element_in_bytes The offset of the first element in the second source image
- * @param[in] block_shape_ptr Pointer to the block shape tensor. Supported data types: S32
- * @param[in] block_shape_stride_x Stride of the block shape tensor in X dimension (in bytes)
- * @param[in] block_shape_step_x block_shape_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] block_shape_offset_first_element_in_bytes The offset of the first element in the block shapetensor
- * @param[in] batch_id The output tensor batch id
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
- */
-__kernel void space_to_batch_nchw(
- TENSOR4D_DECLARATION(input),
- IMAGE_DECLARATION(paddings),
- VECTOR_DECLARATION(block_shape),
- const int batch_id,
- TENSOR3D_DECLARATION(output))
-{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
- Image pad = CONVERT_TO_IMAGE_STRUCT_NO_STEP(paddings);
- Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- const int pad_left_x = *((__global int *)offset(&pad, 0, 0));
- const int pad_right_x = *((__global int *)offset(&pad, 1, 0));
- const int pad_left_y = *((__global int *)offset(&pad, 0, 1));
- const int pad_right_y = *((__global int *)offset(&pad, 1, 1));
-
- int block_x = *((__global int *)vector_offset(&block, 0));
- int block_y = *((__global int *)vector_offset(&block, 1));
-
- const int out_x = get_global_id(0);
- const int out_y = get_global_id(1);
- const int z = get_global_id(2);
-
- const int pos_x = out_x * block_x + ((batch_id / BATCH_IN) % block_x);
- const int pos_y = out_y * block_y + ((batch_id / BATCH_IN) / block_x);
-
- if(((pos_y >= pad_left_y) && (pos_y < pad_left_y + HEIGHT_IN) && (pos_x >= pad_left_x) && (pos_x < pad_left_x + WIDTH_IN)))
- {
- const int w = batch_id % BATCH_IN;
- const int in_x = pos_x - pad_left_x;
- const int in_y = pos_y - pad_left_y;
-
- *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, w));
- }
-}
/** Calculate the space to batch conversion. (NHWC)
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
@@ -133,7 +64,7 @@ __kernel void space_to_batch_nhwc(
const int batch_id,
TENSOR3D_DECLARATION(output))
{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
Image pad = CONVERT_TO_IMAGE_STRUCT_NO_STEP(paddings);
Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
@@ -165,62 +96,6 @@ __kernel void space_to_batch_nhwc(
#endif // defined(BATCH_SIZE) && defined(DATA_TYPE) && defined(WIDTH_IN) && defined(HEIGHT_IN)
#if defined(BATCH_SIZE) && defined(DATA_TYPE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y) && defined(PAD_LEFT_X) && defined(PAD_RIGHT_X) && defined(PAD_LEFT_Y) && defined(PAD_RIGHT_Y) && defined(WIDTH_IN) && defined(HEIGHT_IN)
-/** Calculate the space to batch conversion.
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
- * @note The block shape x must be passed at compile time using -DBLOCK_SHAPE_X. e.g. -DBLOCK_SHAPE_X=2
- * @note The block shape y must be passed at compile time using -DBLOCK_SHAPE_Y. e.g. -DBLOCK_SHAPE_Y=2
- * @note The starting pad value of x must be passed at compile time using -DPAD_LEFT_X. e.g. -DPAD_LEFT_X=2
- * @note The ending pad value of x must be passed at compile time using -DPAD_RIGHT_X. e.g. -DPAD_RIGHT_X=2
- * @note The starting pad value of y must be passed at compile time using -DPAD_LEFT_Y. e.g. -DPAD_LEFT_Y=2
- * @note The ending pad value of y must be passed at compile time using -DPAD_RIGHT_Y. e.g. -DPAD_RIGHT_X=2
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source image
- * @param[in] batch_id The output tensor batch id
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
- */
-__kernel void space_to_batch_static_nchw(
- TENSOR4D_DECLARATION(input),
- const int batch_id,
- TENSOR3D_DECLARATION(output))
-{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- int block_x = BLOCK_SHAPE_X;
- int block_y = BLOCK_SHAPE_Y;
-
- const int out_x = get_global_id(0);
- const int out_y = get_global_id(1);
- const int z = get_global_id(2);
-
- const int pos_x = out_x * block_x + ((batch_id / BATCH_IN) % block_x);
- const int pos_y = out_y * block_y + ((batch_id / BATCH_IN) / block_x);
-
- if(pos_y >= PAD_LEFT_Y && pos_y < PAD_LEFT_Y + HEIGHT_IN && pos_x >= PAD_LEFT_X && pos_x < PAD_LEFT_X + WIDTH_IN)
- {
- const int w = batch_id % BATCH_IN;
- const int in_x = pos_x - PAD_LEFT_X;
- const int in_y = pos_y - PAD_LEFT_Y;
-
- *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, w));
- }
-}
/** Calculate the space to batch conversion. (NHWC)
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
@@ -255,7 +130,7 @@ __kernel void space_to_batch_static_nhwc(
const int batch_id,
TENSOR3D_DECLARATION(output))
{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
int block_x = BLOCK_SHAPE_X;
diff --git a/src/core/CL/cl_kernels/nhwc/space_to_depth.cl b/src/core/CL/cl_kernels/nhwc/space_to_depth.cl
new file mode 100644
index 0000000000..10aac6d5fb
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/space_to_depth.cl
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019-2021, 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
+/** Space to depth transformation. (NHWC)
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The input tensor batch size must be passed at compile time using -DCHANNEL_SIZE. e.g. -DCHANNEL_SIZE=2
+ * @note The block shape must be passed at compile time using -DBLOCK_SHAPE. e.g. -DBLOCK_SHAPE=2
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[in] batch_id The input tensor batch id
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void space_to_depth_nhwc(
+ TENSOR4D_DECLARATION(input),
+ const int batch_id,
+ TENSOR3D_DECLARATION(output))
+{
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+ const int r = (CHANNEL_SIZE / (BLOCK_SHAPE * BLOCK_SHAPE));
+ const int x = get_global_id(1);
+ const int y = get_global_id(2);
+ const int z = get_global_id(0) % r;
+
+ const int in_x = x * BLOCK_SHAPE + (get_global_id(0) / r) % BLOCK_SHAPE;
+ const int in_y = y * BLOCK_SHAPE + (get_global_id(0) / r) / BLOCK_SHAPE;
+
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, z, in_x, in_y, batch_id));
+}
+#endif // defined(DATA_TYPE) && defined(BLOCK_SHAPE) && defined(CHANNEL_SIZE)
diff --git a/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl b/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl
new file mode 100644
index 0000000000..1393537283
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "helpers.h"
+#include "tile_helpers.h"
+
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the transposed convolution.
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16/QASYMM8/QASYMM8_SIGNED
+ * @note The transposed convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The transposed convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
+ * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
+ * @note The channels of the destination tensor must be passed at compile time using -DDST_CHANNELS (e.g. -DDST_CHANNELS=64)
+ * @note The tensor type (currently only "BUFFER" is supported) of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type (currently only "BUFFER" is supported) of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
+ * @note The tensor type (currently only "BUFFER" is supported) of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DBIA_DATA_TYPE (e.g. -DBIA_DATA_TYPE=float)
+ * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
+ * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
+ * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note If bias exists, the compile time argument -DHAS_BIAS should be passed
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 1, 2, 3, 4, 8, 16
+ *
+ * @note In case of QASYMM8/QASYMM8_SIGNED, the following extra information must be passed at compile time:
+ * - -DIS_QUANTIZED
+ * - The destination quantization multiplier e.g. -DDST_MULTIPLIER=1234
+ * - The destination quantization shift e.g. -DDST_SHIFT=4
+ * - The destination offset e.g. -DDST_OFFSET=4
+ * - The source offset e.g. -DSRC_OFFSET=4
+ * - The weights offset e.g. -DWEI_OFFSET=4
+ * - The quantized zero value e.g. -DZERO_VALUE=4
+ *
+ * @param[in] src_img (Not supported) Read only cl_image object for the source tensor. Included when SRC_TENSOR_TYPE=IMAGE
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_c The size of the channels (IFM) dimension of the source tensor
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the batches dimension of the source tensor
+ * @param[out] dst_img (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels (OFM) dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] wei_img (Not supported) Read only cl_image object for the weights tensor. Included when WEI_TENSOR_TYPE=IMAGE
+ * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_c The size of the channels (IFM) dimension of the weights tensor
+ * @param[in] wei_w The size of the width dimension of the weights tensor
+ * @param[in] wei_h The size of the height dimension of the weights tensor
+ * @param[in] wei_n The size of the batches (OFM) dimension of the weights tensor
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the bias matrix
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr (if F32/F16)
+ * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
+ */
+//! @endcond
+__kernel void transposed_convolution_nhwc(
+ TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
+ TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
+ TENSOR4D_RO_T(wei, WEI_TENSOR_TYPE)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+ // All the tensor dimensions are passed at compile time.
+ // In case of dynamic tensor support, the following dimensions should be passed as function argument.
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _ISRC_WIDTH SRC_WIDTH
+#define _ISRC_HEIGHT SRC_HEIGHT
+#define _ISRC_CHANNELS SRC_CHANNELS
+#define _IDST_WIDTH DST_WIDTH
+#define _IDST_HEIGHT DST_HEIGHT
+#define _IDST_CHANNELS DST_CHANNELS
+#define _IY_MULTIPLIER (_IWEI_WIDTH * _IWEI_HEIGHT)
+
+#if defined(IS_QUANTIZED)
+#define _IOUTPUT_TILE cq
+#else // defined(IS_QUANTIZED)
+#define _IOUTPUT_TILE c
+#endif // defined(IS_QUANTIZED)
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, M0, 0); // WIDTH x HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+
+ // .v = access the whole vector (OpenCL vector)
+ // .s[x] = access the vector element at position x (scalar access)
+ TILE(int, 1, M0, xi);
+ TILE(int, 1, M0, yi);
+ TILE(int, 1, M0, xu);
+ TILE(int, 1, M0, yu);
+
+ // Convert the linear index to coordinate
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ xu[0].s[i] = ((mout + i) % _IDST_WIDTH) - PAD_LEFT;
+ yu[0].s[i] = ((mout + i) / _IDST_WIDTH) - PAD_TOP;
+ xi[0].s[i] = ceil(xu[0].s[i] / (float)STRIDE_X);
+ yi[0].s[i] = ceil(yu[0].s[i] / (float)STRIDE_Y);
+ })
+
+ // Initialize the accumulators
+ TILE(ACC_DATA_TYPE, M0, N0, c);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+ // Flipped indices
+ const int x_start = _IWEI_WIDTH - (xi[0].s[0] * STRIDE_X - xu[0].s[0]) - 1;
+ const int y_start = _IWEI_HEIGHT - (yi[0].s[0] * STRIDE_Y - yu[0].s[0]) - 1;
+
+ for(int yk = y_start, yi_step = 0; yk >= 0; yk -= STRIDE_Y, ++yi_step)
+ {
+ for(int xk = x_start, xi_step = 0; xk >= 0; xk -= STRIDE_X, ++xi_step)
+ {
+ const int weights_y = cout * _IY_MULTIPLIER + yk * _IWEI_WIDTH + xk;
+
+ TILE(int, 1, M0, my);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ int x_s = xi[0].s[i] + xi_step;
+ int y_s = yi[0].s[i] + yi_step;
+ my[0].s[i] = x_s + y_s *_ISRC_WIDTH;
+ my[0].s[i] = my[0].s[i] + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
+ my[0].s[i] = select(-1, my[0].s[i], x_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], x_s < _ISRC_WIDTH);
+ my[0].s[i] = select(-1, my[0].s[i], y_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], y_s < _ISRC_HEIGHT);
+ })
+
+ int ck = 0;
+ for(; ck <= (_ISRC_CHANNELS - K0); ck += K0)
+ {
+ TILE(SRC_DATA_TYPE, M0, K0, a);
+ TILE(WEI_DATA_TYPE, N0, K0, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = ZERO_VALUE;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = ZERO_VALUE;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, weights_y, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
+
+#if defined(IS_QUANTIZED)
+ // Apply the offset correction (correction usually needed for asymmetric quantized computation)
+ // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
+ T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, a, b, c);
+#endif // defined(IS_QUANTIZED)
+ }
+
+ // This #if directive should be removed in case of dynamic tensor support
+#if defined(LEFTOVER_LOOP)
+ // Left-over accumulations
+ for(; ck < _ISRC_CHANNELS; ++ck)
+ {
+ TILE(SRC_DATA_TYPE, M0, 1, a);
+ TILE(WEI_DATA_TYPE, N0, 1, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = ZERO_VALUE;
+ })
+
+ // Load tile from the src tensor
+ // The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, BUFFER, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ // The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
+ T_LOAD(WEI_DATA_TYPE, N0, 1, BUFFER, wei, ck, weights_y, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, ACC_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
+
+#if defined(IS_QUANTIZED)
+ // Apply the offset correction (correction usually needed for asymmetric quantized computation)
+ // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
+ T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, 1, SRC_OFFSET, WEI_OFFSET, a, b, c);
+#endif // defined(IS_QUANTIZED)
+ }
+#endif // defined(LEFTOVER_LOOP)
+ }
+ }
+
+#if defined(IS_QUANTIZED)
+ const int total_pixels = floor((1 + y_start / (float)STRIDE_Y)) * floor(1 + x_start / (float)STRIDE_X);
+
+ T_ADD_CONSTANT(ACC_DATA_TYPE, M0, N0, c, (total_pixels * _ISRC_CHANNELS * SRC_OFFSET * WEI_OFFSET), c);
+#endif // defined(IS_QUANTIZED)
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 1, 0, bias0);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(ACC_DATA_TYPE, M0, N0, c, bias0, c);
+
+#endif // HAS_BIAS
+
+#if defined(IS_QUANTIZED)
+
+ TILE(DST_DATA_TYPE, M0, N0, cq);
+
+ // Quantize the tile
+ T_QUANTIZE8_ASYMMETRIC(ACC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
+#endif // defined(IS_QUANTIZED)
+
+ TILE(uint, M0, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH * _IDST_HEIGHT) - 1);
+ dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
+ })
+
+ bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DST_DATA_TYPE, M0, N0, PARTIAL_N0, DST_TENSOR_TYPE, dst, cout, dst_stride_y, x_cond, _IOUTPUT_TILE, dst_indirect_y);
+
+#undef _IWEI_WIDTH
+#undef _IWEI_HEIGHT
+#undef _ISRC_WIDTH
+#undef _ISRC_HEIGHT
+#undef _ISRC_CHANNELS
+#undef _IDST_WIDTH
+#undef _IDST_HEIGHT
+#undef _IDST_CHANNELS
+#undef _IY_MULTIPLIER
+}
diff --git a/src/core/CL/cl_kernels/upsample_layer.cl b/src/core/CL/cl_kernels/nhwc/upsample_layer.cl
index d0cc0f24b7..74b9674a88 100644
--- a/src/core/CL/cl_kernels/upsample_layer.cl
+++ b/src/core/CL/cl_kernels/nhwc/upsample_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,61 +23,6 @@
*/
#include "helpers.h"
-/** This function applies upsample on an input image. (NCHW)
- *
- * @attention The following variables must be passed at compile time:
- * -# -DDATA_TYPE = Tensor data type. Supported data types: All
- * -# -DVEC_SIZE_IN = Input vector size
- * -# -DVEC_SIZE_OUT = Output vector size
- * -# -DLAST_ACCESSED_X_IN = The input element that is on the X border (threads trying to set this, might need to step back a bit)
- * -# -DLAST_ACCESSED_X_OUT = The output element that is on the X border (threads trying to set this, might need to step back a bit)
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: All
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] dst_ptr Pointer to the destination image. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination image
- */
-__kernel void upsample_layer_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
-#if defined(VEC_SIZE_IN) && defined(VEC_SIZE_OUT) && defined(LAST_ACCESSED_X_IN) && defined(LAST_ACCESSED_X_OUT)
- // Check if access on width gets out of bounds
- // If it does shift access vector to access elements within bounds
- const int xi_in = (int)(get_global_id(0) * VEC_SIZE_IN);
- const int xi_out = (int)(get_global_id(0) * VEC_SIZE_OUT);
- src.ptr -= max(xi_in - (int)LAST_ACCESSED_X_IN, 0) * src_stride_x;
- dst.ptr -= max(xi_out - (int)LAST_ACCESSED_X_OUT, 0) * dst_stride_x;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- data = vload8(0, (__global DATA_TYPE *)src.ptr);
-
- VEC_DATA_TYPE(DATA_TYPE, 16)
- data_out = (VEC_DATA_TYPE(DATA_TYPE, 16))(data.s0, data.s0, data.s1, data.s1, data.s2, data.s2, data.s3, data.s3, data.s4, data.s4, data.s5, data.s5, data.s6, data.s6, data.s7, data.s7);
-
- vstore16(data_out, 0, (__global DATA_TYPE *)dst.ptr);
- vstore16(data_out, 0, (__global DATA_TYPE *)tensor3D_offset(&dst, 0, 1, 0));
-#else // !defined(VEC_SIZE_IN) && defined(VEC_SIZE_OUT) && defined(LAST_ACCESSED_X_IN) && defined(LAST_ACCESSED_X_OUT)
- *((__global DATA_TYPE *)tensor3D_offset(&dst, 0, 0, 0)) = *((__global DATA_TYPE *)src.ptr);
- *((__global DATA_TYPE *)tensor3D_offset(&dst, 0, 1, 0)) = *((__global DATA_TYPE *)src.ptr);
-#endif // defined(VEC_SIZE_IN) && defined(VEC_SIZE_OUT) && defined(LAST_ACCESSED_X_IN) && defined(LAST_ACCESSED_X_OUT)
-}
-
/** This function applies upsample on an input image. (NHWC)
*
* @attention The following variables must be passed at compile time:
@@ -132,4 +77,4 @@ __kernel void upsample_layer_nhwc(
*((__global DATA_TYPE *)tensor3D_offset(&dst, 0, 0, 1)) = *((__global DATA_TYPE *)src.ptr);
*((__global DATA_TYPE *)tensor3D_offset(&dst, 0, 1, 1)) = *((__global DATA_TYPE *)src.ptr);
#endif // defined(VEC_SIZE_IN) && defined(VEC_SIZE_OUT) && defined(LAST_ACCESSED_X_IN) && defined(LAST_ACCESSED_X_OUT)
-}
+} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/winograd_filter_transform.cl b/src/core/CL/cl_kernels/nhwc/winograd_filter_transform.cl
index 5c3bb8aa9b..45fbc1b641 100644
--- a/src/core/CL/cl_kernels/winograd_filter_transform.cl
+++ b/src/core/CL/cl_kernels/nhwc/winograd_filter_transform.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,8 +23,6 @@
*/
#include "helpers.h"
-#if defined(SRC_DIM_Z)
-
#define OUTPUT_ROW_2x2_7x7(out, tmp) \
({ \
out.s0 = -tmp.s0 / 36.f; \
@@ -37,291 +35,9 @@
out.s7 = tmp.s6; \
})
-/** This OpenCL kernel performs Winograd filter transform 3x3/3x1/1x3 when the data layout is NCHW and the output tile is 2x2/2x1/1x2
- *
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
- * @note If this kernel is used to perform Winograd filter transform 3x1, -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd filter transform 1x3, -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_filter_transform_2x2_3x3_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DIM_Z);
-
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-
- // Load the values from the input tensor
-#if defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w0 = vload3(0, (__global DATA_TYPE *)(src_addr));
-#elif defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w0 = (VEC_DATA_TYPE(DATA_TYPE, 3))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)));
-#else // defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w0 = vload3(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w1 = vload3(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w2 = vload3(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
-#endif // defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
-
- // Row 0
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out0 = 0.0f;
- out0.s0 = (w0.s0);
- out0.s1 = (w0.s0 + w0.s1 + w0.s2) * 0.5f;
- out0.s2 = (w0.s0 + w0.s2 - w0.s1) * 0.5f;
- out0.s3 = (w0.s2);
-
-#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- // Row 1
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out1 = 0.0f;
- out1.s0 = (w0.s0 + w1.s0 + w2.s0) * 0.5f;
- out1.s1 = (w0.s0 + w1.s0 + w2.s0 + w0.s1 + w1.s1 + w2.s1 + w0.s2 + w1.s2 + w2.s2) * 0.25f;
- out1.s2 = (w0.s0 + w1.s0 + w2.s0 + w0.s2 + w1.s2 + w2.s2 - w0.s1 - w1.s1 - w2.s1) * 0.25f;
- out1.s3 = (w0.s2 + w1.s2 + w2.s2) * 0.5f;
-
- // Row 2
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out2 = 0.0f;
- out2.s0 = (w0.s0 + w2.s0 - w1.s0) * 0.5f;
- out2.s1 = (w0.s0 + w2.s0 + w0.s1 + w2.s1 + w0.s2 + w2.s2 - w1.s0 - w1.s1 - w1.s2) * 0.25f;
- out2.s2 = (w0.s0 + w2.s0 + w1.s1 + w0.s2 + w2.s2 - w1.s0 - w0.s1 - w2.s1 - w1.s2) * 0.25f;
- out2.s3 = (w0.s2 + w2.s2 - w1.s2) * 0.5f;
-
- // Row 3
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out3 = 0.0f;
- out3.s0 = (w2.s0);
- out3.s1 = (w2.s0 + w2.s1 + w2.s2) * 0.5f;
- out3.s2 = (w2.s0 + w2.s2 - w2.s1) * 0.5f;
- out3.s3 = (w2.s2);
-#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
-
- int z = get_global_id(2);
- int x0 = z / SRC_DIM_Z; // idx filter
- int y0 = z % SRC_DIM_Z; // idx channel
-
- // Get output address
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x0 * dst_stride_x + y0 * dst_stride_y;
-
- // Store the values across the channels
- // 16 channels for 3x3 kernels
- // 4 channels for 3x1 or 1x3 kernels
- *(__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z) = out0.s0;
- *(__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z) = out0.s1;
- *(__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z) = out0.s2;
- *(__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z) = out0.s3;
-
-#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- *(__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z) = out1.s0;
- *(__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z) = out1.s1;
- *(__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z) = out1.s2;
- *(__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z) = out1.s3;
- *(__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z) = out2.s0;
- *(__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z) = out2.s1;
- *(__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z) = out2.s2;
- *(__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z) = out2.s3;
- *(__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z) = out3.s0;
- *(__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z) = out3.s1;
- *(__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z) = out3.s2;
- *(__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z) = out3.s3;
-#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
-}
-
-/** This OpenCL kernel performs Winograd filter transform 3x3/3x1/1x3 when the data layout is NCHW and the output tile is 4x4/4x1/1x4
- *
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
- * @note If this kernel is used to perform Winograd filter transform 3x1, -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd filter transform 1x3, -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_filter_transform_4x4_3x3_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DIM_Z);
-
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-
- // Load the values from the input tensor
-#if defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w0 = vload3(0, (__global DATA_TYPE *)(src_addr));
-#elif defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w0 = (VEC_DATA_TYPE(DATA_TYPE, 3))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)));
-#else // defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w0 = vload3(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w1 = vload3(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 3)
- w2 = vload3(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
-#endif // defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
-
- // Row 0
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out0 = 0.0f;
- out0.s0 = (w0.s0) / 16.f;
- out0.s1 = (-w0.s0 - w0.s1 - w0.s2) / 24.f;
- out0.s2 = (-w0.s0 + w0.s1 - w0.s2) / 24.f;
- out0.s3 = (w0.s0 + 2.f * w0.s1 + 4.f * w0.s2) / 96.f;
- out0.s4 = (w0.s0 - 2.f * w0.s1 + 4.f * w0.s2) / 96.f;
- out0.s5 = (w0.s2) / 4.f;
-
-#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- // Row 1
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out1 = 0.0f;
- out1.s0 = (-w0.s0 - w1.s0 - w2.s0) / 24.f;
- out1.s1 = (w0.s0 + w1.s0 + w2.s0 + w0.s1 + w1.s1 + w2.s1 + w0.s2 + w1.s2 + w2.s2) / 36.f;
- out1.s2 = (w0.s0 + w1.s0 + w2.s0 - w0.s1 - w1.s1 - w2.s1 + w0.s2 + w1.s2 + w2.s2) / 36.f;
- out1.s3 = (-w0.s0 - w1.s0 - w2.s0 + 2.f * (-w0.s1 - w1.s1 - w2.s1) + 4.f * (-w0.s2 - w1.s2 - w2.s2)) / 144.f;
- out1.s4 = (-w0.s0 - w1.s0 - w2.s0 + 2.f * (w0.s1 + w1.s1 + w2.s1) + 4.f * (-w0.s2 - w1.s2 - w2.s2)) / 144.f;
- out1.s5 = (-w0.s2 - w1.s2 - w2.s2) / 6.f;
-
- // Row 2
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out2 = 0.0f;
- out2.s0 = (-w0.s0 + w1.s0 - w2.s0) / 24.f;
- out2.s1 = (w0.s0 - w1.s0 + w2.s0 + w0.s1 - w1.s1 + w2.s1 + w0.s2 - w1.s2 + w2.s2) / 36.f;
- out2.s2 = (w0.s0 - w1.s0 + w2.s0 - w0.s1 + w1.s1 - w2.s1 + w0.s2 - w1.s2 + w2.s2) / 36.f;
- out2.s3 = (-w0.s0 + w1.s0 - w2.s0 + 2.f * (-w0.s1 + w1.s1 - w2.s1) + 4.f * (-w0.s2 + w1.s2 - w2.s2)) / 144.f;
- out2.s4 = (-w0.s0 + w1.s0 - w2.s0 + 2.f * (w0.s1 - w1.s1 + w2.s1) + 4.f * (-w0.s2 + w1.s2 - w2.s2)) / 144.f;
- out2.s5 = (-w0.s2 + w1.s2 - w2.s2) / 6.f;
-
- // Row 3
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out3 = 0.0f;
- out3.s0 = (w0.s0 + 2.f * w1.s0 + 4.f * w2.s0) / 96.f;
- out3.s1 = (-w0.s0 - 2.f * w1.s0 - 4.f * w2.s0 - w0.s1 - 2.f * w1.s1 - 4.f * w2.s1 - w0.s2 - 2.f * w1.s2 - 4.f * w2.s2) / 144.f;
- out3.s2 = (-w0.s0 - 2.f * w1.s0 - 4.f * w2.s0 + w0.s1 + 2.f * w1.s1 + 4.f * w2.s1 - w0.s2 - 2.f * w1.s2 - 4.f * w2.s2) / 144.f;
- out3.s3 = ((w0.s0 + 2.f * w1.s0 + 4.f * w2.s0) + 2.f * (w0.s1 + 2.f * w1.s1 + 4.f * w2.s1) + 4.f * (w0.s2 + 2.f * w1.s2 + 4.f * w2.s2)) / 576.f;
- out3.s4 = ((w0.s0 + 2.f * w1.s0 + 4.f * w2.s0) + 2.f * (-w0.s1 - 2.f * w1.s1 - 4.f * w2.s1) + 4.f * (w0.s2 + 2.f * w1.s2 + 4.f * w2.s2)) / 576.f;
- out3.s5 = (w0.s2 + 2.f * w1.s2 + 4.f * w2.s2) / 24.f;
-
- // Row 4
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out4 = 0.0f;
- out4.s0 = (w0.s0 - 2.f * w1.s0 + 4.f * w2.s0) / 96.f;
- out4.s1 = (-w0.s0 + 2.f * w1.s0 - 4.f * w2.s0 - w0.s1 + 2.f * w1.s1 - 4.f * w2.s1 - w0.s2 + 2.f * w1.s2 - 4.f * w2.s2) / 144.f;
- out4.s2 = (-w0.s0 + 2.f * w1.s0 - 4.f * w2.s0 + w0.s1 - 2.f * w1.s1 + 4.f * w2.s1 - w0.s2 + 2.f * w1.s2 - 4.f * w2.s2) / 144.f;
- out4.s3 = ((w0.s0 - 2.f * w1.s0 + 4.f * w2.s0) + 2.f * (w0.s1 - 2.f * w1.s1 + 4.f * w2.s1) + 4.f * (w0.s2 - 2.f * w1.s2 + 4.f * w2.s2)) / 576.f;
- out4.s4 = ((w0.s0 - 2.f * w1.s0 + 4.f * w2.s0) + 2.f * (-w0.s1 + 2.f * w1.s1 - 4.f * w2.s1) + 4.f * (w0.s2 - 2.f * w1.s2 + 4.f * w2.s2)) / 576.f;
- out4.s5 = (w0.s2 - 2.f * w1.s2 + 4.f * w2.s2) / 24.f;
-
- // Row 5
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out5 = 0.0f;
- out5.s0 = (w2.s0) / 4.f;
- out5.s1 = (-w2.s0 - w2.s1 - w2.s2) / 6.f;
- out5.s2 = (-w2.s0 + w2.s1 - w2.s2) / 6.f;
- out5.s3 = (w2.s0 + 2.f * w2.s1 + 4.f * w2.s2) / 24.f;
- out5.s4 = (w2.s0 - 2.f * w2.s1 + 4.f * w2.s2) / 24.f;
- out5.s5 = (w2.s2);
-#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
-
- int z = get_global_id(2);
- int x0 = z / SRC_DIM_Z; // idx filter
- int y0 = z % SRC_DIM_Z; // idx channel
-
- // Get output address
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x0 * dst_stride_x + y0 * dst_stride_y;
-
- // Store the values across the channels
- // 36 channels for 3x3 kernels
- // 6 channels for 3x1 or 1x3 kernels
- *(__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z) = out0.s0;
- *(__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z) = out0.s1;
- *(__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z) = out0.s2;
- *(__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z) = out0.s3;
- *(__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z) = out0.s4;
- *(__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z) = out0.s5;
-
-#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- *(__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z) = out1.s0;
- *(__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z) = out1.s1;
- *(__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z) = out1.s2;
- *(__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z) = out1.s3;
- *(__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z) = out1.s4;
- *(__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z) = out1.s5;
- *(__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z) = out2.s0;
- *(__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z) = out2.s1;
- *(__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z) = out2.s2;
- *(__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z) = out2.s3;
- *(__global DATA_TYPE *)(dst_addr + 16 * dst_stride_z) = out2.s4;
- *(__global DATA_TYPE *)(dst_addr + 17 * dst_stride_z) = out2.s5;
- *(__global DATA_TYPE *)(dst_addr + 18 * dst_stride_z) = out3.s0;
- *(__global DATA_TYPE *)(dst_addr + 19 * dst_stride_z) = out3.s1;
- *(__global DATA_TYPE *)(dst_addr + 20 * dst_stride_z) = out3.s2;
- *(__global DATA_TYPE *)(dst_addr + 21 * dst_stride_z) = out3.s3;
- *(__global DATA_TYPE *)(dst_addr + 22 * dst_stride_z) = out3.s4;
- *(__global DATA_TYPE *)(dst_addr + 23 * dst_stride_z) = out3.s5;
- *(__global DATA_TYPE *)(dst_addr + 24 * dst_stride_z) = out4.s0;
- *(__global DATA_TYPE *)(dst_addr + 25 * dst_stride_z) = out4.s1;
- *(__global DATA_TYPE *)(dst_addr + 26 * dst_stride_z) = out4.s2;
- *(__global DATA_TYPE *)(dst_addr + 27 * dst_stride_z) = out4.s3;
- *(__global DATA_TYPE *)(dst_addr + 28 * dst_stride_z) = out4.s4;
- *(__global DATA_TYPE *)(dst_addr + 29 * dst_stride_z) = out4.s5;
- *(__global DATA_TYPE *)(dst_addr + 30 * dst_stride_z) = out5.s0;
- *(__global DATA_TYPE *)(dst_addr + 31 * dst_stride_z) = out5.s1;
- *(__global DATA_TYPE *)(dst_addr + 32 * dst_stride_z) = out5.s2;
- *(__global DATA_TYPE *)(dst_addr + 33 * dst_stride_z) = out5.s3;
- *(__global DATA_TYPE *)(dst_addr + 34 * dst_stride_z) = out5.s4;
- *(__global DATA_TYPE *)(dst_addr + 35 * dst_stride_z) = out5.s5;
-#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
-}
-
+#if defined(WINOGRAD_FILTER_TRANSFORM_4X4_3X3_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_4X1_3X1_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_1X4_1X3_NHWC)
/** This OpenCL kernel performs Winograd filter transform 3x3/3x1/1x3 when the data layout is NHWC and the output tile is 4x4/4x1/1x4
*
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
* @note If this kernel is used to perform Winograd filter transform 3x1, -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time
* @note If this kernel is used to perform Winograd filter transform 1x3, -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
@@ -344,10 +60,12 @@ __kernel void winograd_filter_transform_4x4_3x3_nchw(
* @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_DIM_Z The third (Z) dimension of the src tensor
*/
__kernel void winograd_filter_transform_4x4_3x3_nhwc(
TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst),
+ const int SRC_DIM_Z)
{
Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DIM_Z);
@@ -476,312 +194,11 @@ __kernel void winograd_filter_transform_4x4_3x3_nhwc(
*(__global DATA_TYPE *)(dst_addr + 35 * dst_stride_z) = out55;
#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
}
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_4X4_3X3_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_4X1_3X1_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_1X4_1X3_NHWC)
-/** This OpenCL kernel performs Winograd filter transform 5x5/5x1 or 1x5 when the data layout is NCHW and the output tile is 4x4/4x1 or 1x4
- *
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
- *
- * @note If this kernel is used to perform Winograd filter transform 5x1, -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd filter transform 1x5, -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_filter_transform_4x4_5x5_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DIM_Z);
-
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-
- // Load the values from the input tensor
-#if defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- w00 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
- DATA_TYPE w01 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_y) + 4);
-#elif defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- w00 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
- DATA_TYPE w01 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
-#else // defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- w00 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
- DATA_TYPE w01 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_y) + 4);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- w10 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
- DATA_TYPE w11 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y) + 4);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- w20 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
- DATA_TYPE w21 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y) + 4);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- w30 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
- DATA_TYPE w31 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y) + 4);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- w40 = vload4(0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
- DATA_TYPE w41 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_y) + 4);
-#endif // defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
-
- // Transform the input tile
-
- // Row 0
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out0 = 0.0f;
- out0.s0 = w00.s0;
- out0.s1 = -2.f * (w00.s0 + w00.s1 + w00.s2 + w00.s3 + w01) / 9.f;
- out0.s2 = -2.f * (w00.s0 - w00.s1 + w00.s2 - w00.s3 + w01) / 9.f;
- out0.s3 = (w00.s0 + 2.f * w00.s1 + 4.f * w00.s2 + 8.f * w00.s3 + 16.f * w01) / 90.f;
- out0.s4 = (w00.s0 - 2.f * w00.s1 + 4.f * w00.s2 - 8.f * w00.s3 + 16.f * w01) / 90.f;
- out0.s5 = (16.f * w00.s0 + 8.f * w00.s1 + 4.f * w00.s2 + 2.f * w00.s3 + w01) / 180.f;
- out0.s6 = (16.f * w00.s0 - 8.f * w00.s1 + 4.f * w00.s2 - 2.f * w00.s3 + w01) / 180.f;
- out0.s7 = w01;
-
-#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- // Row 1
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out1 = 0.0f;
- out1.s0 = -2.f * (w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) / 9.f;
- out1.s1 = 4.f * ((w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) + (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) +
- (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + (w01 + w11 + w21 + w31 + w41)) / 81.f;
- out1.s2 = 4.f * ((w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) - (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) -
- (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + (w01 + w11 + w21 + w31 + w41)) / 81.f;
- out1.s3 = -((w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) + 2.f * (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + 4.f * (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) + 8.f *
- (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + 16.f * (w01 + w11 + w21 + w31 + w41)) / 405.f;
- out1.s4 = -((w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) - 2.f * (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + 4.f * (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) - 8.f *
- (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + 16.f * (w01 + w11 + w21 + w31 + w41)) / 405.f;
- out1.s5 = -(16.f * (w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) + 8.f * (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + 4.f * (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) + 2.f *
- (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + (w01 + w11 + w21 + w31 + w41)) / 810.f;
- out1.s6 = -(16.f * (w00.s0 + w10.s0 + w20.s0 + w30.s0 + w40.s0) - 8.f * (w00.s1 + w10.s1 + w20.s1 + w30.s1 + w40.s1) + 4.f * (w00.s2 + w10.s2 + w20.s2 + w30.s2 + w40.s2) - 2.f *
- (w00.s3 + w10.s3 + w20.s3 + w30.s3 + w40.s3) + (w01 + w11 + w21 + w31 + w41)) / 810.f;
- out1.s7 = -2.f * (w01 + w11 + w21 + w31 + w41) / 9.f;
-
- // Row 2
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out2 = 0.0f;
- out2.s0 = -2.f * (w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) / 9.f;
- out2.s1 = 4.f * ((w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) + (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) +
- (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + (w01 - w11 + w21 - w31 + w41)) / 81.f;
- out2.s2 = 4.f * ((w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) - (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) -
- (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + (w01 - w11 + w21 - w31 + w41)) / 81.f;
- out2.s3 = -((w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) + 2.f * (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + 4.f * (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) + 8.f *
- (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + 16.f * (w01 - w11 + w21 - w31 + w41)) / 405.f;
- out2.s4 = -((w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) - 2.f * (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + 4.f * (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) - 8.f *
- (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + 16.f * (w01 - w11 + w21 - w31 + w41)) / 405.f;
- out2.s5 = -(16.f * (w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) + 8.f * (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + 4.f * (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) + 2.f *
- (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + (w01 - w11 + w21 - w31 + w41)) / 810.f;
- out2.s6 = -(16.f * (w00.s0 - w10.s0 + w20.s0 - w30.s0 + w40.s0) - 8.f * (w00.s1 - w10.s1 + w20.s1 - w30.s1 + w40.s1) + 4.f * (w00.s2 - w10.s2 + w20.s2 - w30.s2 + w40.s2) - 2.f *
- (w00.s3 - w10.s3 + w20.s3 - w30.s3 + w40.s3) + (w01 - w11 + w21 - w31 + w41)) / 810.f;
- out2.s7 = -2.f * (w01 - w11 + w21 - w31 + w41) / 9.f;
-
- // Row 3
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out3 = 0.0f;
- out3.s0 = (w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) / 90.f;
- out3.s1 = -((w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) + (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) +
- (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) + (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) +
- (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 405.f;
- out3.s2 = -((w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) - (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) +
- (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) - (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) +
- (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 405.f;
- out3.s3 = ((w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) + 2.f * (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
- (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) + 8.f * (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) + 16.f *
- (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 8100.f;
- out3.s4 = ((w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) - 2.f * (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
- (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) - 8.f * (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) + 16.f *
- (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 8100.f;
- out3.s5 = (16.f * (w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) + 8.f * (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
- (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) + 2.f * (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) +
- (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 16200.f;
- out3.s6 = (16.f * (w00.s0 + 2.f * w10.s0 + 4.f * w20.s0 + 8.f * w30.s0 + 16.f * w40.s0) - 8.f * (w00.s1 + 2.f * w10.s1 + 4.f * w20.s1 + 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
- (w00.s2 + 2.f * w10.s2 + 4.f * w20.s2 + 8.f * w30.s2 + 16.f * w40.s2) - 2.f * (w00.s3 + 2.f * w10.s3 + 4.f * w20.s3 + 8.f * w30.s3 + 16.f * w40.s3) +
- (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41)) / 16200.f;
- out3.s7 = (w01 + 2.f * w11 + 4.f * w21 + 8.f * w31 + 16.f * w41) / 90.f;
-
- // Row 4
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out4 = 0.0f;
- out4.s0 = (w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) / 90.f;
- out4.s1 = -((w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) + (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) +
- (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) + (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) +
- (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 405.f;
- out4.s2 = -((w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) - (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) +
- (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) - (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) +
- (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 405.f;
- out4.s3 = ((w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) + 2.f * (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
- (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) + 8.f * (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) + 16.f *
- (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 8100.f;
- out4.s4 = ((w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) - 2.f * (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
- (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) - 8.f * (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) + 16.f *
- (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 8100.f;
- out4.s5 = (16.f * (w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) + 8.f * (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
- (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) + 2.f * (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) +
- (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 16200.f;
- out4.s6 = (16.f * (w00.s0 - 2.f * w10.s0 + 4.f * w20.s0 - 8.f * w30.s0 + 16.f * w40.s0) - 8.f * (w00.s1 - 2.f * w10.s1 + 4.f * w20.s1 - 8.f * w30.s1 + 16.f * w40.s1) + 4.f *
- (w00.s2 - 2.f * w10.s2 + 4.f * w20.s2 - 8.f * w30.s2 + 16.f * w40.s2) - 2.f * (w00.s3 - 2.f * w10.s3 + 4.f * w20.s3 - 8.f * w30.s3 + 16.f * w40.s3) +
- (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41)) / 16200.f;
- out4.s7 = (w01 - 2.f * w11 + 4.f * w21 - 8.f * w31 + 16.f * w41) / 90.f;
-
- // Row 5
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out5 = 0.0f;
- out5.s0 = (16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) / 180.f;
- out5.s1 = -((16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) + (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) +
- (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) + (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) +
- (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 810.f;
- out5.s2 = -((16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) - (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) +
- (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) - (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) +
- (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 810.f;
- out5.s3 = ((16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) + 2.f * (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) + 4.f *
- (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) + 8.f * (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) + 16.f *
- (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 16200.f;
- out5.s4 = ((16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) - 2.f * (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) + 4.f *
- (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) - 8.f * (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) + 16.f *
- (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 16200.f;
- out5.s5 = (16.f * (16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) + 8.f * (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) + 4.f *
- (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) + 2.f * (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) +
- (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 32400.f;
- out5.s6 = (16.f * (16.f * w00.s0 + 8.f * w10.s0 + 4.f * w20.s0 + 2.f * w30.s0 + w40.s0) - 8.f * (16.f * w00.s1 + 8.f * w10.s1 + 4.f * w20.s1 + 2.f * w30.s1 + w40.s1) + 4.f *
- (16.f * w00.s2 + 8.f * w10.s2 + 4.f * w20.s2 + 2.f * w30.s2 + w40.s2) - 2.f * (16.f * w00.s3 + 8.f * w10.s3 + 4.f * w20.s3 + 2.f * w30.s3 + w40.s3) +
- (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41)) / 32400.f;
- out5.s7 = (16.f * w01 + 8.f * w11 + 4.f * w21 + 2.f * w31 + w41) / 180.f;
-
- // Row 6
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out6 = 0.0f;
- out6.s0 = (16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) / 180.f;
- out6.s1 = -((16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) + (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) +
- (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) + (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) +
- (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 810.f;
- out6.s2 = -((16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) - (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) +
- (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) - (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) +
- (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 810.f;
- out6.s3 = ((16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) + 2.f * (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) + 4.f *
- (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) + 8.f * (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) + 16.f *
- (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 16200.f;
- out6.s4 = ((16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) - 2.f * (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) + 4.f *
- (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) - 8.f * (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) + 16.f *
- (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 16200.f;
- out6.s5 = (16.f * (16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) + 8.f * (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) + 4.f *
- (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) + 2.f * (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) +
- (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 32400.f;
- out6.s6 = (16.f * (16.f * w00.s0 - 8.f * w10.s0 + 4.f * w20.s0 - 2.f * w30.s0 + w40.s0) - 8.f * (16.f * w00.s1 - 8.f * w10.s1 + 4.f * w20.s1 - 2.f * w30.s1 + w40.s1) + 4.f *
- (16.f * w00.s2 - 8.f * w10.s2 + 4.f * w20.s2 - 2.f * w30.s2 + w40.s2) - 2.f * (16.f * w00.s3 - 8.f * w10.s3 + 4.f * w20.s3 - 2.f * w30.s3 + w40.s3) +
- (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41)) / 32400.f;
- out6.s7 = (16.f * w01 - 8.f * w11 + 4.f * w21 - 2.f * w31 + w41) / 180.f;
-
- // Row 7
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out7 = 0.0f;
- out7.s0 = w40.s0;
- out7.s1 = -2.f * (w40.s0 + w40.s1 + w40.s2 + w40.s3 + w41) / 9.f;
- out7.s2 = -2.f * (w40.s0 - w40.s1 + w40.s2 - w40.s3 + w41) / 9.f;
- out7.s3 = (w40.s0 + 2.f * w40.s1 + 4.f * w40.s2 + 8.f * w40.s3 + 16.f * w41) / 90.f;
- out7.s4 = (w40.s0 - 2.f * w40.s1 + 4.f * w40.s2 - 8.f * w40.s3 + 16.f * w41) / 90.f;
- out7.s5 = (16.f * w40.s0 + 8.f * w40.s1 + 4.f * w40.s2 + 2.f * w40.s3 + w41) / 180.f;
- out7.s6 = (16.f * w40.s0 - 8.f * w40.s1 + 4.f * w40.s2 - 2.f * w40.s3 + w41) / 180.f;
- out7.s7 = w41;
-#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
-
- int z = get_global_id(2);
- int x0 = z / SRC_DIM_Z; // idx filter
- int y0 = z % SRC_DIM_Z; // idx channel
-
- // Get output address
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x0 * sizeof(DATA_TYPE) + y0 * dst_stride_y;
-
- // Store the values across the channels
- *(__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z) = out0.s0;
- *(__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z) = out0.s1;
- *(__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z) = out0.s2;
- *(__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z) = out0.s3;
- *(__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z) = out0.s4;
- *(__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z) = out0.s5;
- *(__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z) = out0.s6;
- *(__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z) = out0.s7;
-
-#if !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
- *(__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z) = out1.s0;
- *(__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z) = out1.s1;
- *(__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z) = out1.s2;
- *(__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z) = out1.s3;
- *(__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z) = out1.s4;
- *(__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z) = out1.s5;
- *(__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z) = out1.s6;
- *(__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z) = out1.s7;
- *(__global DATA_TYPE *)(dst_addr + 16 * dst_stride_z) = out2.s0;
- *(__global DATA_TYPE *)(dst_addr + 17 * dst_stride_z) = out2.s1;
- *(__global DATA_TYPE *)(dst_addr + 18 * dst_stride_z) = out2.s2;
- *(__global DATA_TYPE *)(dst_addr + 19 * dst_stride_z) = out2.s3;
- *(__global DATA_TYPE *)(dst_addr + 20 * dst_stride_z) = out2.s4;
- *(__global DATA_TYPE *)(dst_addr + 21 * dst_stride_z) = out2.s5;
- *(__global DATA_TYPE *)(dst_addr + 22 * dst_stride_z) = out2.s6;
- *(__global DATA_TYPE *)(dst_addr + 23 * dst_stride_z) = out2.s7;
- *(__global DATA_TYPE *)(dst_addr + 24 * dst_stride_z) = out3.s0;
- *(__global DATA_TYPE *)(dst_addr + 25 * dst_stride_z) = out3.s1;
- *(__global DATA_TYPE *)(dst_addr + 26 * dst_stride_z) = out3.s2;
- *(__global DATA_TYPE *)(dst_addr + 27 * dst_stride_z) = out3.s3;
- *(__global DATA_TYPE *)(dst_addr + 28 * dst_stride_z) = out3.s4;
- *(__global DATA_TYPE *)(dst_addr + 29 * dst_stride_z) = out3.s5;
- *(__global DATA_TYPE *)(dst_addr + 30 * dst_stride_z) = out3.s6;
- *(__global DATA_TYPE *)(dst_addr + 31 * dst_stride_z) = out3.s7;
- *(__global DATA_TYPE *)(dst_addr + 32 * dst_stride_z) = out4.s0;
- *(__global DATA_TYPE *)(dst_addr + 33 * dst_stride_z) = out4.s1;
- *(__global DATA_TYPE *)(dst_addr + 34 * dst_stride_z) = out4.s2;
- *(__global DATA_TYPE *)(dst_addr + 35 * dst_stride_z) = out4.s3;
- *(__global DATA_TYPE *)(dst_addr + 36 * dst_stride_z) = out4.s4;
- *(__global DATA_TYPE *)(dst_addr + 37 * dst_stride_z) = out4.s5;
- *(__global DATA_TYPE *)(dst_addr + 38 * dst_stride_z) = out4.s6;
- *(__global DATA_TYPE *)(dst_addr + 39 * dst_stride_z) = out4.s7;
- *(__global DATA_TYPE *)(dst_addr + 40 * dst_stride_z) = out5.s0;
- *(__global DATA_TYPE *)(dst_addr + 41 * dst_stride_z) = out5.s1;
- *(__global DATA_TYPE *)(dst_addr + 42 * dst_stride_z) = out5.s2;
- *(__global DATA_TYPE *)(dst_addr + 43 * dst_stride_z) = out5.s3;
- *(__global DATA_TYPE *)(dst_addr + 44 * dst_stride_z) = out5.s4;
- *(__global DATA_TYPE *)(dst_addr + 45 * dst_stride_z) = out5.s5;
- *(__global DATA_TYPE *)(dst_addr + 46 * dst_stride_z) = out5.s6;
- *(__global DATA_TYPE *)(dst_addr + 47 * dst_stride_z) = out5.s7;
- *(__global DATA_TYPE *)(dst_addr + 48 * dst_stride_z) = out6.s0;
- *(__global DATA_TYPE *)(dst_addr + 49 * dst_stride_z) = out6.s1;
- *(__global DATA_TYPE *)(dst_addr + 50 * dst_stride_z) = out6.s2;
- *(__global DATA_TYPE *)(dst_addr + 51 * dst_stride_z) = out6.s3;
- *(__global DATA_TYPE *)(dst_addr + 52 * dst_stride_z) = out6.s4;
- *(__global DATA_TYPE *)(dst_addr + 53 * dst_stride_z) = out6.s5;
- *(__global DATA_TYPE *)(dst_addr + 54 * dst_stride_z) = out6.s6;
- *(__global DATA_TYPE *)(dst_addr + 55 * dst_stride_z) = out6.s7;
- *(__global DATA_TYPE *)(dst_addr + 56 * dst_stride_z) = out7.s0;
- *(__global DATA_TYPE *)(dst_addr + 57 * dst_stride_z) = out7.s1;
- *(__global DATA_TYPE *)(dst_addr + 58 * dst_stride_z) = out7.s2;
- *(__global DATA_TYPE *)(dst_addr + 59 * dst_stride_z) = out7.s3;
- *(__global DATA_TYPE *)(dst_addr + 60 * dst_stride_z) = out7.s4;
- *(__global DATA_TYPE *)(dst_addr + 61 * dst_stride_z) = out7.s5;
- *(__global DATA_TYPE *)(dst_addr + 62 * dst_stride_z) = out7.s6;
- *(__global DATA_TYPE *)(dst_addr + 63 * dst_stride_z) = out7.s7;
-#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
-}
-
+#if defined(WINOGRAD_FILTER_TRANSFORM_4X4_5X5_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_4X1_5X1_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_1X4_1X5_NHWC)
/** This OpenCL kernel performs Winograd filter transform 5x5/5x1 or 1x5 when the data layout is NHWC and the output tile is 4x4/4x1 or 1x4
*
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
* @note If this kernel is used to perform Winograd filter transform 5x1, -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time
* @note If this kernel is used to perform Winograd filter transform 1x5, -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
@@ -804,10 +221,12 @@ __kernel void winograd_filter_transform_4x4_5x5_nchw(
* @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_DIM_Z The third (Z) dimension of the src tensor
*/
__kernel void winograd_filter_transform_4x4_5x5_nhwc(
TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst),
+ const int SRC_DIM_Z)
{
Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DIM_Z);
@@ -1057,9 +476,12 @@ __kernel void winograd_filter_transform_4x4_5x5_nhwc(
*(__global DATA_TYPE *)(dst_addr + 63 * dst_stride_z) = out7.s7;
#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
}
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_4X4_5X5_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_4X1_5X1_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_1X4_1X5_NHWC)
+
+#if defined(WINOGRAD_FILTER_TRANSFORM_2X2_7X7_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_2X1_7X1_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_1X2_1X7_NHWC)
+
/** This OpenCL kernel performs Winograd filter transform 7x7/7x1 or 1x7 when the data layout is NHWC and the output tile is 2x2/2x1 or 1x2
*
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
* @note If this kernel is used to perform Winograd filter transform 7x1, -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time
* @note If this kernel is used to perform Winograd filter transform 1x7, -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
@@ -1082,10 +504,12 @@ __kernel void winograd_filter_transform_4x4_5x5_nhwc(
* @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_DIM_Z The third (Z) dimension of the src tensor
*/
__kernel void winograd_filter_transform_2x2_7x7_nhwc(
TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst),
+ const int SRC_DIM_Z)
{
Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DIM_Z);
@@ -1357,159 +781,13 @@ __kernel void winograd_filter_transform_2x2_7x7_nhwc(
*(__global DATA_TYPE *)(dst_addr + 63 * dst_stride_z) = out7.s7;
#endif // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
}
-#endif // defined(SRC_DIM_Z)
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_2X2_7X7_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_2X1_7X1_NHWC) || defined(WINOGRAD_FILTER_TRANSFORM_1X2_1X7_NHWC)
#if defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
-/** This OpenCL kernel performs Winograd filter transform 3x1 when the data layout is NCHW and the output tile is 2x1
- *
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
- * @note -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time to perform Winograd Filter Transform
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_filter_transform_2x1_3x1_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- winograd_filter_transform_2x2_3x3_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes);
-}
-
-/** This OpenCL kernel performs Winograd filter transform 3x1 when the data layout is NCHW and the output tile is 4x1
- *
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
- * @note -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time to perform Winograd Filter Transform
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_filter_transform_4x1_3x1_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- winograd_filter_transform_4x4_3x3_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes);
-}
-
-/** This OpenCL kernel performs Winograd filter transform 5x1 when the data layout is NCHW and the output tile is 4x1
- *
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
- * @note -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time to perform Winograd Filter Transform
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_filter_transform_4x1_5x1_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- winograd_filter_transform_4x4_5x5_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes);
-}
+#if defined(WINOGRAD_FILTER_TRANSFORM_4X1_3X1_NHWC)
/** This OpenCL kernel performs Winograd filter transform 3x1 when the data layout is NHWC and the output tile is 4x1
*
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
* @note -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time to perform Winograd Filter Transform
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
*
@@ -1531,10 +809,12 @@ __kernel void winograd_filter_transform_4x1_5x1_nchw(
* @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_DIM_Z The third (Z) dimension of the src tensor
*/
__kernel void winograd_filter_transform_4x1_3x1_nhwc(
TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst),
+ const int SRC_DIM_Z)
{
winograd_filter_transform_4x4_3x3_nhwc(src_ptr,
src_stride_x,
@@ -1553,12 +833,14 @@ __kernel void winograd_filter_transform_4x1_3x1_nhwc(
dst_step_y,
dst_stride_z,
dst_step_z,
- dst_offset_first_element_in_bytes);
+ dst_offset_first_element_in_bytes,
+ SRC_DIM_Z);
}
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_4X1_3X1_NHWC)
+#if defined(WINOGRAD_FILTER_TRANSFORM_4X1_5X1_NHWC)
/** This OpenCL kernel performs Winograd filter transform 5x1 when the data layout is NHWC and the output tile is 4x1
*
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
* @note -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time to perform Winograd Filter Transform
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
*
@@ -1580,10 +862,12 @@ __kernel void winograd_filter_transform_4x1_3x1_nhwc(
* @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_DIM_Z The third (Z) dimension of the src tensor
*/
__kernel void winograd_filter_transform_4x1_5x1_nhwc(
TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst),
+ const int SRC_DIM_Z)
{
winograd_filter_transform_4x4_5x5_nhwc(src_ptr,
src_stride_x,
@@ -1602,12 +886,14 @@ __kernel void winograd_filter_transform_4x1_5x1_nhwc(
dst_step_y,
dst_stride_z,
dst_step_z,
- dst_offset_first_element_in_bytes);
+ dst_offset_first_element_in_bytes,
+ SRC_DIM_Z);
}
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_4X1_5X1_NHWC)
+#if defined(WINOGRAD_FILTER_TRANSFORM_2X1_7X1_NHWC)
/** This OpenCL kernel performs Winograd filter transform 7x1 when the data layout is NHWC and the output tile is 2x1
*
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
* @note -DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL has to be passed at compile time to perform Winograd Filter Transform
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float.
*
@@ -1629,10 +915,12 @@ __kernel void winograd_filter_transform_4x1_5x1_nhwc(
* @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_DIM_Z The third (Z) dimension of the src tensor
*/
__kernel void winograd_filter_transform_2x1_7x1_nhwc(
TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst),
+ const int SRC_DIM_Z)
{
winograd_filter_transform_2x2_7x7_nhwc(src_ptr,
src_stride_x,
@@ -1651,161 +939,16 @@ __kernel void winograd_filter_transform_2x1_7x1_nhwc(
dst_step_y,
dst_stride_z,
dst_step_z,
- dst_offset_first_element_in_bytes);
+ dst_offset_first_element_in_bytes,
+ SRC_DIM_Z);
}
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_2X1_7X1_NHWC)
#endif // defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
#if defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
-/** This OpenCL kernel performs Winograd filter transform 1x3 when the data layout is NCHW and the output tile is 1x2
- *
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
- * @note -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time to perform Winograd Filter Transform
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_filter_transform_1x2_1x3_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- winograd_filter_transform_2x2_3x3_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes);
-}
-
-/** This OpenCL kernel performs Winograd filter transform 1x3 when the data layout is NCHW and the output tile is 1x4
- *
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
- * @note -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time to perform Winograd Filter Transform
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_filter_transform_1x4_1x3_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- winograd_filter_transform_4x4_3x3_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes);
-}
-
-/** This OpenCL kernel performs Winograd filter transform 1x5 when the data layout is NCHW and the output tile is 1x4
- *
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
- * @note -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time to perform Winograd Filter Transform
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_filter_transform_1x4_1x5_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- winograd_filter_transform_4x4_5x5_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes);
-}
-
+#if defined(WINOGRAD_FILTER_TRANSFORM_1X4_1X3_NHWC)
/** This OpenCL kernel performs Winograd filter transform 1x3 when the data layout is NHWC and the output tile is 1x4
*
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
* @note -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time to perform Winograd Filter Transform
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
*
@@ -1827,10 +970,12 @@ __kernel void winograd_filter_transform_1x4_1x5_nchw(
* @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_DIM_Z The third (Z) dimension of the src tensor
*/
__kernel void winograd_filter_transform_1x4_1x3_nhwc(
TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst),
+ const int SRC_DIM_Z)
{
winograd_filter_transform_4x4_3x3_nhwc(src_ptr,
src_stride_x,
@@ -1849,12 +994,14 @@ __kernel void winograd_filter_transform_1x4_1x3_nhwc(
dst_step_y,
dst_stride_z,
dst_step_z,
- dst_offset_first_element_in_bytes);
+ dst_offset_first_element_in_bytes,
+ SRC_DIM_Z);
}
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_1X4_1X3_NHWC)
+#if defined(WINOGRAD_FILTER_TRANSFORM_1X4_1X5_NHWC)
/** This OpenCL kernel performs Winograd filter transform 1x5 when the data layout is NHWC and the output tile is 1x4
*
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
* @note -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time to perform Winograd Filter Transform
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
*
@@ -1876,10 +1023,12 @@ __kernel void winograd_filter_transform_1x4_1x3_nhwc(
* @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_DIM_Z The third (Z) dimension of the src tensor
*/
__kernel void winograd_filter_transform_1x4_1x5_nhwc(
TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst),
+ const int SRC_DIM_Z)
{
winograd_filter_transform_4x4_5x5_nhwc(src_ptr,
src_stride_x,
@@ -1898,12 +1047,14 @@ __kernel void winograd_filter_transform_1x4_1x5_nhwc(
dst_step_y,
dst_stride_z,
dst_step_z,
- dst_offset_first_element_in_bytes);
+ dst_offset_first_element_in_bytes,
+ SRC_DIM_Z);
}
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_1X4_1X5_NHWC)
+#if defined(WINOGRAD_FILTER_TRANSFORM_1X2_1X7_NHWC)
/** This OpenCL kernel performs Winograd filter transform 1x7 when the data layout is NHWC and the output tile is 1x2
*
- * @note In order to correctly split the input tensor in batches, its dimension across the Z axis (channels for NCHW, height for NHWC) must be passed at compile time using -DSRC_DIM_Z: e.g. -DSRC_DIM_Z=64
* @note -DWINOGRAD_FILTER_TRANSFORM_VERTICAL has to be passed at compile time to perform Winograd Filter Transform
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float.
*
@@ -1925,10 +1076,12 @@ __kernel void winograd_filter_transform_1x4_1x5_nhwc(
* @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_DIM_Z The third (Z) dimension of the src tensor
*/
__kernel void winograd_filter_transform_1x2_1x7_nhwc(
TENSOR4D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst),
+ const int SRC_DIM_Z)
{
winograd_filter_transform_2x2_7x7_nhwc(src_ptr,
src_stride_x,
@@ -1947,6 +1100,8 @@ __kernel void winograd_filter_transform_1x2_1x7_nhwc(
dst_step_y,
dst_stride_z,
dst_step_z,
- dst_offset_first_element_in_bytes);
+ dst_offset_first_element_in_bytes,
+ SRC_DIM_Z);
}
+#endif // defined(WINOGRAD_FILTER_TRANSFORM_1X2_1X7_NHWC)
#endif // defined(WINOGRAD_FILTER_TRANSFORM_VERTICAL)
diff --git a/src/core/CL/cl_kernels/nhwc/winograd_input_transform.cl b/src/core/CL/cl_kernels/nhwc/winograd_input_transform.cl
new file mode 100644
index 0000000000..7341336b92
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/winograd_input_transform.cl
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (c) 2018-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#define OUTPUT_ROW_4x4_5x5(out, tmp, comm_fact) \
+ ({ \
+ comm_fact.s0 = tmp.s2 - (DATA_TYPE)4.25f * tmp.s4 + tmp.s6; \
+ comm_fact.s1 = tmp.s1 - (DATA_TYPE)4.25f * tmp.s3 + tmp.s5; \
+ comm_fact.s2 = (DATA_TYPE)2.5f * tmp.s3; \
+ comm_fact.s3 = (DATA_TYPE)0.5f * tmp.s1 + (DATA_TYPE)2.f * tmp.s5 - comm_fact.s2; \
+ comm_fact.s4 = (DATA_TYPE)0.25f * tmp.s2 - (DATA_TYPE)1.25f * tmp.s4 + tmp.s6; \
+ comm_fact.s5 = (DATA_TYPE)4.f * tmp.s2 + tmp.s6 - (DATA_TYPE)5.f * tmp.s4; \
+ comm_fact.s6 = (DATA_TYPE)2.f * tmp.s1 + (DATA_TYPE)0.5f * tmp.s5 - comm_fact.s2; \
+ \
+ out.s0 = tmp.s0 - tmp.s6 + (DATA_TYPE)5.25f * tmp.s4 - (DATA_TYPE)5.25f * tmp.s2; \
+ out.s1 = comm_fact.s0 + comm_fact.s1; \
+ out.s2 = comm_fact.s0 - comm_fact.s1; \
+ out.s3 = comm_fact.s3 + comm_fact.s4; \
+ out.s4 = comm_fact.s4 - comm_fact.s3; \
+ out.s5 = comm_fact.s5 + comm_fact.s6; \
+ out.s6 = comm_fact.s5 - comm_fact.s6; \
+ out.s7 = tmp.s7 - tmp.s1 + (DATA_TYPE)5.25f * tmp.s3 - (DATA_TYPE)5.25f * tmp.s5; \
+ })
+
+#define OUTPUT_ROW_2x2_7x7(out, tmp, comm_fact) \
+ ({ \
+ comm_fact.s0 = (DATA_TYPE)36.0f * tmp.s2 - (DATA_TYPE)13.0f * tmp.s4 + tmp.s6; \
+ comm_fact.s1 = (DATA_TYPE)36.0f * tmp.s1 - (DATA_TYPE)13.0f * tmp.s3 + (DATA_TYPE)1.0f * tmp.s5; \
+ comm_fact.s2 = (DATA_TYPE)9.0f * tmp.s2 - (DATA_TYPE)10.0f * tmp.s4 + tmp.s6; \
+ comm_fact.s3 = (DATA_TYPE)18.0f * tmp.s1 - (DATA_TYPE)20.0f * tmp.s3 + (DATA_TYPE)2.0f * tmp.s5; \
+ comm_fact.s4 = (DATA_TYPE)4.0f * tmp.s2 - (DATA_TYPE)5.0f * tmp.s4 + tmp.s6; \
+ comm_fact.s5 = (DATA_TYPE)12.0f * tmp.s1 - (DATA_TYPE)15.0f * tmp.s3 + (DATA_TYPE)3.0f * tmp.s5; \
+ out.s0 = -(DATA_TYPE)36.0f * tmp.s0 + (DATA_TYPE)49.0f * tmp.s2 + -(DATA_TYPE)14.0f * tmp.s4 + tmp.s6; \
+ out.s1 = comm_fact.s0 - comm_fact.s1; \
+ out.s2 = comm_fact.s0 + comm_fact.s1; \
+ out.s3 = comm_fact.s2 - comm_fact.s3; \
+ out.s4 = comm_fact.s2 + comm_fact.s3; \
+ out.s5 = comm_fact.s4 - comm_fact.s5; \
+ out.s6 = comm_fact.s4 + comm_fact.s5; \
+ out.s7 = -(DATA_TYPE)36.0f * tmp.s1 + (DATA_TYPE)0.0f * tmp.s2 + (DATA_TYPE)49.0f * tmp.s3 - (DATA_TYPE)14.0f * tmp.s5 + tmp.s7; \
+ })
+
+#if defined(PAD_LEFT) && defined(PAD_TOP) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
+
+#if defined(NHWC)
+#if defined(WINOGRAD_INPUT_TRANSFORM_4X4_3X3_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_4X1_3X1_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_1X4_1X3_STEPZ1_NHWC)
+//! @cond Doxygen_Suppress
+/** This OpenCL kernel computes the input transform when the output tile is 4x4, 4x1 or 1x4, the filter size 3x3, 3x1 or 1x3 and the data layout is NHWC
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_WIDTH The src tensor's width
+ * @param[in] _ISRC_HEIGHT The src tensor's height
+ * @param[in] _INUM_TILES_X The number of tiles in the X dimension
+ * @param[in] _INUM_TILES_Y The number of tiles in the Y dimension
+ */
+//! @endcond
+__kernel void winograd_input_transform_4x4_3x3_stepz1_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ const int _ISRC_WIDTH,
+ const int _ISRC_HEIGHT,
+ const int _INUM_TILES_X,
+ const int _INUM_TILES_Y)
+{
+ const int cout = GET_SPATIAL_IDX(0, N0, 0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, 1, 0); // NUM_TILES_X x NUM_TILES_Y
+#if defined(IS_BATCHED)
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+#else // defined(IS_BATCHED)
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(IS_BATCHED)
+
+ int x = (mout % _INUM_TILES_X) * OUTPUT_TILE_W;
+ int y = (mout / _INUM_TILES_X) * OUTPUT_TILE_H;
+ x -= PAD_LEFT;
+ y -= PAD_TOP;
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ TILE(DATA_TYPE, 6, N0, in);
+ TILE(DATA_TYPE, 6, N0, out);
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 6,
+ {
+ in[i].v = 0;
+ })
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ T_LOAD_NHWC(DATA_TYPE, 1, 6, N0, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
+#else // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ T_LOAD_NHWC(DATA_TYPE, 6, 1, N0, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+
+ TILE(DATA_TYPE, 6, N0, com);
+
+ LOOP_UNROLLING(int, i, 0, 1, 6,
+ {
+ in[i].v *= (DATA_TYPE)4.0f;
+ })
+
+ com[0].v = in[2].v - (DATA_TYPE)4.f * in[0].v;
+ com[1].v = in[3].v - (DATA_TYPE)4.f * in[1].v;
+ com[2].v = in[4].v - (DATA_TYPE)4.f * in[2].v;
+ com[3].v = in[5].v - (DATA_TYPE)4.f * in[3].v;
+ com[4].v = in[3].v - in[1].v;
+ com[4].v = com[4].v + com[4].v;
+ com[5].v = in[4].v - in[2].v;
+
+ out[0].v = com[2].v - com[0].v;
+ out[1].v = com[2].v + com[1].v;
+ out[2].v = com[2].v - com[1].v;
+ out[3].v = com[5].v + com[4].v;
+ out[4].v = com[5].v - com[4].v;
+ out[5].v = com[3].v - com[1].v;
+
+ TILE(uint, 6, 1, dst_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 6,
+ {
+ dst_indirect_y[i].v = mout + i *_INUM_TILES_X *_INUM_TILES_Y;
+ dst_indirect_y[i].v += bout *_INUM_TILES_X *_INUM_TILES_Y * 6;
+ })
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 6, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+
+#else // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ TILE(DATA_TYPE, 36, N0, in);
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 36,
+ {
+ in[i].v = 0;
+ })
+
+ // Load the tile from a NHWC tensor
+ T_LOAD_NHWC(DATA_TYPE, 6, 6, N0, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
+
+ TILE(DATA_TYPE, 6, N0, com);
+ TILE(DATA_TYPE, 36, N0, tmp);
+
+ LOOP_UNROLLING(int, i, 0, 1, 6,
+ {
+ com[0].v = in[2 * 6 + i].v - (DATA_TYPE)4.0f * in[0 * 6 + i].v;
+ com[1].v = in[3 * 6 + i].v - (DATA_TYPE)4.0f * in[1 * 6 + i].v;
+ com[2].v = in[4 * 6 + i].v - (DATA_TYPE)4.0f * in[2 * 6 + i].v;
+ com[3].v = in[5 * 6 + i].v - (DATA_TYPE)4.0f * in[3 * 6 + i].v;
+ com[4].v = in[3 * 6 + i].v - in[1 * 6 + i].v;
+ com[4].v = com[4].v + com[4].v;
+ com[5].v = in[4 * 6 + i].v - in[2 * 6 + i].v;
+ tmp[i + 0 * 6].v = com[2].v - com[0].v;
+ tmp[i + 1 * 6].v = com[2].v + com[1].v;
+ tmp[i + 2 * 6].v = com[2].v - com[1].v;
+ tmp[i + 3 * 6].v = com[5].v + com[4].v;
+ tmp[i + 4 * 6].v = com[5].v - com[4].v;
+ tmp[i + 5 * 6].v = com[3].v - com[1].v;
+ })
+
+ TILE(DATA_TYPE, 36, N0, out);
+
+ LOOP_UNROLLING(int, i, 0, 1, 6,
+ {
+ com[0].v = tmp[i * 6 + 2].v - (DATA_TYPE)4.f *tmp[i * 6 + 0].v;
+ com[1].v = tmp[i * 6 + 3].v - (DATA_TYPE)4.f *tmp[i * 6 + 1].v;
+ com[2].v = tmp[i * 6 + 4].v - (DATA_TYPE)4.f *tmp[i * 6 + 2].v;
+ com[3].v = tmp[i * 6 + 5].v - (DATA_TYPE)4.f *tmp[i * 6 + 3].v;
+ com[4].v = tmp[i * 6 + 3].v - tmp[i * 6 + 1].v;
+ com[4].v = com[4].v + com[4].v;
+ com[5].v = tmp[i * 6 + 4].v - tmp[i * 6 + 2].v;
+ out[i * 6 + 0].v = com[2].v - com[0].v;
+ out[i * 6 + 1].v = com[2].v + com[1].v;
+ out[i * 6 + 2].v = com[2].v - com[1].v;
+ out[i * 6 + 3].v = com[5].v + com[4].v;
+ out[i * 6 + 4].v = com[5].v - com[4].v;
+ out[i * 6 + 5].v = com[3].v - com[1].v;
+ })
+
+ // Compute destination address
+ TILE(uint, 36, 1, dst_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 36,
+ {
+ dst_indirect_y[i].v = mout + i *_INUM_TILES_X *_INUM_TILES_Y;
+ dst_indirect_y[i].v += bout *_INUM_TILES_X *_INUM_TILES_Y * 36;
+ })
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 36, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_4X4_3X3_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_4X1_3X1_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_1X4_1X3_STEPZ1_NHWC)
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_4X4_5X5_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_4X1_5X1_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_1X4_1X5_STEPZ1_NHWC)
+//! @cond Doxygen_Suppress
+/** This OpenCL kernel computes the input transform when the kernel size is 5x5/5x1 or 1x5 and the output tile is 4x4/4x1 or 1x4 when the data layout is NHWC
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_WIDTH The src tensor's width
+ * @param[in] _ISRC_HEIGHT The src tensor's height
+ * @param[in] _INUM_TILES_X The number of tiles in the X dimension
+ * @param[in] _INUM_TILES_Y The number of tiles in the Y dimension
+ */
+//! @endcond
+__kernel void winograd_input_transform_4x4_5x5_stepz1_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ const int _ISRC_WIDTH,
+ const int _ISRC_HEIGHT,
+ const int _INUM_TILES_X,
+ const int _INUM_TILES_Y)
+{
+ const int cout = GET_SPATIAL_IDX(0, 1, 0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, 1, 0); // NUM_TILES_X x NUM_TILES_Y
+#if defined(IS_BATCHED)
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+#else // defined(IS_BATCHED)
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(IS_BATCHED)
+
+ int x = (mout % _INUM_TILES_X) * OUTPUT_TILE_W;
+ int y = (mout / _INUM_TILES_X) * OUTPUT_TILE_H;
+ x -= PAD_LEFT;
+ y -= PAD_TOP;
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ TILE(DATA_TYPE, 8, 1, in);
+ TILE(DATA_TYPE, 8, 1, out);
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ in[i].v = 0;
+ })
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ T_LOAD_NHWC(DATA_TYPE, 1, 8, N0, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
+#else // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ T_LOAD_NHWC(DATA_TYPE, 8, 1, N0, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+
+ TILE(DATA_TYPE, 1, 8, com);
+
+ com[0].s[0] = in[2].v - (DATA_TYPE)4.25f * in[4].v + in[6].v;
+ com[0].s[1] = in[1].v - (DATA_TYPE)4.25f * in[3].v + in[5].v;
+ com[0].s[2] = (DATA_TYPE)0.5f * in[1].v - (DATA_TYPE)2.5f * in[3].v + (DATA_TYPE)2.0f * in[5].v;
+ com[0].s[3] = (DATA_TYPE)0.25f * in[2].v - (DATA_TYPE)1.25f * in[4].v + in[6].v;
+ com[0].s[4] = (DATA_TYPE)4.0f * in[2].v - (DATA_TYPE)5.0f * in[4].v + in[6].v;
+ com[0].s[5] = (DATA_TYPE)2.0f * in[1].v - (DATA_TYPE)2.5f * in[3].v + (DATA_TYPE)0.5f * in[5].v;
+ out[0].s[0] = in[0].v - 5.25f * in[2].v + (DATA_TYPE)5.25f * in[4].v - in[6].v;
+ out[1].s[0] = com[0].s[0] + com[0].s[1];
+ out[2].s[0] = com[0].s[0] - com[0].s[1];
+ out[3].s[0] = com[0].s[3] + com[0].s[2];
+ out[4].s[0] = com[0].s[3] - com[0].s[2];
+ out[5].s[0] = com[0].s[4] + com[0].s[5];
+ out[6].s[0] = com[0].s[4] - com[0].s[5];
+ out[7].s[0] = -in[1].v + (DATA_TYPE)5.25f * in[3].v - (DATA_TYPE)5.25f * in[5].v + in[7].v;
+
+ TILE(uint, 8, 1, dst_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ dst_indirect_y[i].v = mout + i *_INUM_TILES_X *_INUM_TILES_Y;
+ dst_indirect_y[i].v += bout *_INUM_TILES_X *_INUM_TILES_Y * 8;
+ })
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 8, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+
+#else // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ TILE(DATA_TYPE, 64, 1, in);
+ TILE(DATA_TYPE, 64, 1, out);
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 64,
+ {
+ in[i].v = 0;
+ })
+
+ // Load the tile from a NHWC tensor
+ T_LOAD_NHWC(DATA_TYPE, 8, 8, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
+
+ TILE(DATA_TYPE, 8, 8, com);
+
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ com[0].s[i] = in[2 * 8 + i].s[0] - (DATA_TYPE)4.25f * in[4 * 8 + i].s[0] + in[6 * 8 + i].s[0]; // x
+ com[1].s[i] = in[1 * 8 + i].s[0] - (DATA_TYPE)4.25f * in[3 * 8 + i].s[0] + in[5 * 8 + i].s[0]; // x
+ com[2].s[i] = (DATA_TYPE)0.25f * in[2 * 8 + i].s[0] - (DATA_TYPE)1.25f * in[4 * 8 + i].s[0] + in[6 * 8 + i].s[0]; // x
+ com[3].s[i] = (DATA_TYPE)0.5f * in[1 * 8 + i].s[0] - (DATA_TYPE)2.5f * in[3 * 8 + i].s[0] + (DATA_TYPE)2.0f * in[5 * 8 + i].s[0]; // x
+ com[4].s[i] = (DATA_TYPE)4.0f * in[2 * 8 + i].s[0] - (DATA_TYPE)5.0f * in[4 * 8 + i].s[0] + in[6 * 8 + i].s[0];
+ com[5].s[i] = (DATA_TYPE)2.0f * in[1 * 8 + i].s[0] - (DATA_TYPE)2.5f * in[3 * 8 + i].s[0] + (DATA_TYPE)0.5f * in[5 * 8 + i].s[0];
+ com[6].s[i] = in[0 * 8 + i].s[0] - (DATA_TYPE)5.25f * in[2 * 8 + i].s[0] + (DATA_TYPE)5.25f * in[4 * 8 + i].s[0] - in[6 * 8 + i].s[0];
+ com[7].s[i] = -in[1 * 8 + i].s[0] + (DATA_TYPE)5.25f * in[3 * 8 + i].s[0] - (DATA_TYPE)5.25f * in[5 * 8 + i].s[0] + in[7 * 8 + i].s[0];
+ })
+
+ TILE(DATA_TYPE, 8, 8, tmp);
+ tmp[0].v = com[6].v;
+ tmp[1].v = com[0].v + com[1].v;
+ tmp[2].v = com[0].v - com[1].v;
+ tmp[3].v = com[2].v + com[3].v;
+ tmp[4].v = com[2].v - com[3].v;
+ tmp[5].v = com[4].v + com[5].v;
+ tmp[6].v = com[4].v - com[5].v;
+ tmp[7].v = com[7].v;
+
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ com[0].s[0] = tmp[i].s[2] - (DATA_TYPE)4.25f * tmp[i].s[4] + tmp[i].s[6];
+ com[0].s[1] = tmp[i].s[1] - (DATA_TYPE)4.25f * tmp[i].s[3] + tmp[i].s[5];
+ com[0].s[2] = (DATA_TYPE)0.5f * tmp[i].s[1] - (DATA_TYPE)2.5f * tmp[i].s[3] + (DATA_TYPE)2.0f * tmp[i].s[5];
+ com[0].s[3] = (DATA_TYPE)0.25f * tmp[i].s[2] - (DATA_TYPE)1.25f * tmp[i].s[4] + tmp[i].s[6];
+ com[0].s[4] = (DATA_TYPE)4.0f * tmp[i].s[2] - (DATA_TYPE)5.0f * tmp[i].s[4] + tmp[i].s[6];
+ com[0].s[5] = (DATA_TYPE)2.0f * tmp[i].s[1] - (DATA_TYPE)2.5f * tmp[i].s[3] + (DATA_TYPE)0.5f * tmp[i].s[5];
+ out[i * 8 + 0].s[0] = tmp[i].s[0] - (DATA_TYPE)5.25f * tmp[i].s[2] + (DATA_TYPE)5.25f * tmp[i].s[4] - tmp[i].s[6];
+ out[i * 8 + 1].s[0] = com[0].s[0] + com[0].s[1];
+ out[i * 8 + 2].s[0] = com[0].s[0] - com[0].s[1];
+ out[i * 8 + 3].s[0] = com[0].s[3] + com[0].s[2];
+ out[i * 8 + 4].s[0] = com[0].s[3] - com[0].s[2];
+ out[i * 8 + 5].s[0] = com[0].s[4] + com[0].s[5];
+ out[i * 8 + 6].s[0] = com[0].s[4] - com[0].s[5];
+ out[i * 8 + 7].s[0] = -tmp[i].s[1] + (DATA_TYPE)5.25f * tmp[i].s[3] - (DATA_TYPE)5.25f * tmp[i].s[5] + tmp[i].s[7];
+ })
+
+ TILE(uint, 64, 1, dst_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 64,
+ {
+ dst_indirect_y[i].v = mout + i *_INUM_TILES_X *_INUM_TILES_Y;
+ dst_indirect_y[i].v += bout *_INUM_TILES_X *_INUM_TILES_Y * 64;
+ })
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 64, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+
+#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_4X4_5X5_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_4X1_5X1_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_1X4_1X5_STEPZ1_NHWC)
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_2X2_7X7_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_2X1_7X1_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_1X2_1X7_STEPZ1_NHWC)
+//! @cond Doxygen_Suppress
+/** This OpenCL kernel computes the input transform when the kernel size is 7x7/7x1/1x7 and the output tile is 2x2/7x1/1x7 when the data layout is NHWC
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_WIDTH The src tensor's width
+ * @param[in] _ISRC_HEIGHT The src tensor's height
+ * @param[in] _INUM_TILES_X The number of tiles in the X dimension
+ * @param[in] _INUM_TILES_Y The number of tiles in the Y dimension
+ */
+//! @endcond
+__kernel void winograd_input_transform_2x2_7x7_stepz1_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ const int _ISRC_WIDTH,
+ const int _ISRC_HEIGHT,
+ const int _INUM_TILES_X,
+ const int _INUM_TILES_Y)
+{
+ const int cout = GET_SPATIAL_IDX(0, 1, 0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, 1, 0); // NUM_TILES_X x NUM_TILES_Y
+#if defined(IS_BATCHED)
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+#else // defined(IS_BATCHED)
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(IS_BATCHED)
+
+ int x = (mout % _INUM_TILES_X) * OUTPUT_TILE_W;
+ int y = (mout / _INUM_TILES_X) * OUTPUT_TILE_H;
+ x -= PAD_LEFT;
+ y -= PAD_TOP;
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ TILE(DATA_TYPE, 8, 1, in);
+ TILE(DATA_TYPE, 8, 1, out);
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ in[i].v = 0;
+ })
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ T_LOAD_NHWC(DATA_TYPE, 1, 8, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
+#else // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+ T_LOAD_NHWC(DATA_TYPE, 8, 1, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
+
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ in[i].v *= (DATA_TYPE) - 36.0f;
+ })
+
+ TILE(DATA_TYPE, 1, 8, com) = { { { 0 } } };
+
+ com[0].s[0] = (DATA_TYPE)36.0f * in[2].v - (DATA_TYPE)13.0f * in[4].v + in[6].v;
+ com[0].s[1] = (DATA_TYPE)36.0f * in[1].v - (DATA_TYPE)13.0f * in[3].v + (DATA_TYPE)1.0f * in[5].v;
+ com[0].s[2] = (DATA_TYPE)9.0f * in[2].v - (DATA_TYPE)10.0f * in[4].v + in[6].v;
+ com[0].s[3] = (DATA_TYPE)18.0f * in[1].v - (DATA_TYPE)20.0f * in[3].v + (DATA_TYPE)2.0f * in[5].v;
+ com[0].s[4] = (DATA_TYPE)4.0f * in[2].v - (DATA_TYPE)5.0f * in[4].v + in[6].v;
+ com[0].s[5] = (DATA_TYPE)12.0f * in[1].v - (DATA_TYPE)15.0f * in[3].v + (DATA_TYPE)3.0f * in[5].v;
+ out[0].s[0] = (DATA_TYPE) - 36.0f * in[0].v + (DATA_TYPE)49.0f * in[2].v + -(DATA_TYPE)14.0f * in[4].v + in[6].v;
+ out[1].s[0] = com[0].s[0] - com[0].s[1];
+ out[2].s[0] = com[0].s[0] + com[0].s[1];
+ out[3].s[0] = com[0].s[2] - com[0].s[3];
+ out[4].s[0] = com[0].s[2] + com[0].s[3];
+ out[5].s[0] = com[0].s[4] - com[0].s[5];
+ out[6].s[0] = com[0].s[4] + com[0].s[5];
+ out[7].s[0] = -(DATA_TYPE)36.0f * in[1].v + (DATA_TYPE)0.0f * in[2].v + (DATA_TYPE)49.0f * in[3].v - (DATA_TYPE)14.0f * in[5].v + in[7].v;
+
+ TILE(uint, 8, 1, dst_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ dst_indirect_y[i].v = mout + i *_INUM_TILES_X *_INUM_TILES_Y;
+ dst_indirect_y[i].v += bout *_INUM_TILES_X *_INUM_TILES_Y * 8;
+ })
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 8, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+
+#else // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+
+ TILE(DATA_TYPE, 64, 1, in);
+ TILE(DATA_TYPE, 64, 1, out);
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 64,
+ {
+ in[i].v = 0;
+ })
+
+ // Load the tile from a NHWC tensor
+ T_LOAD_NHWC(DATA_TYPE, 8, 8, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
+
+ TILE(DATA_TYPE, 8, 8, com);
+
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ com[0].s[i] = (DATA_TYPE)36.0f * in[2 * 8 + i].s[0] - (DATA_TYPE)13.0f * in[4 * 8 + i].s[0] + in[6 * 8 + i].s[0];
+ com[1].s[i] = (DATA_TYPE)36.0f * in[1 * 8 + i].s[0] - (DATA_TYPE)13.0f * in[3 * 8 + i].s[0] + in[5 * 8 + i].s[0];
+ com[2].s[i] = (DATA_TYPE)9.0f * in[2 * 8 + i].s[0] - (DATA_TYPE)10.0f * in[4 * 8 + i].s[0] + in[6 * 8 + i].s[0];
+ com[3].s[i] = (DATA_TYPE)18.0f * in[1 * 8 + i].s[0] - (DATA_TYPE)20.0f * in[3 * 8 + i].s[0] + (DATA_TYPE)2.0f * in[5 * 8 + i].s[0];
+ com[4].s[i] = (DATA_TYPE)4.0f * in[2 * 8 + i].s[0] - (DATA_TYPE)5.0f * in[4 * 8 + i].s[0] + in[6 * 8 + i].s[0];
+ com[5].s[i] = (DATA_TYPE)12.0f * in[1 * 8 + i].s[0] - (DATA_TYPE)15.0f * in[3 * 8 + i].s[0] + (DATA_TYPE)3.0f * in[5 * 8 + i].s[0];
+ com[6].s[i] = (DATA_TYPE)49.0f * in[2 * 8 + i].s[0] - (DATA_TYPE)36.0f * in[0 * 8 + i].s[0] + in[6 * 8 + i].s[0] - (DATA_TYPE)14.0f * in[4 * 8 + i].s[0];
+ com[7].s[i] = (DATA_TYPE)49.0f * in[3 * 8 + i].s[0] - (DATA_TYPE)36.0f * in[1 * 8 + i].s[0] + in[7 * 8 + i].s[0] - (DATA_TYPE)14.0f * in[5 * 8 + i].s[0];
+ })
+
+ TILE(DATA_TYPE, 8, 8, tmp);
+ tmp[0].v = com[6].v;
+ tmp[1].v = com[0].v - com[1].v;
+ tmp[2].v = com[0].v + com[1].v;
+ tmp[3].v = com[2].v - com[3].v;
+ tmp[4].v = com[2].v + com[3].v;
+ tmp[5].v = com[4].v - com[5].v;
+ tmp[6].v = com[4].v + com[5].v;
+ tmp[7].v = com[7].v;
+
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ com[0].s[0] = (DATA_TYPE)36.0f * tmp[i].s[2] - (DATA_TYPE)13.0f * tmp[i].s[4] + tmp[i].s[6];
+ com[0].s[1] = (DATA_TYPE)36.0f * tmp[i].s[1] - (DATA_TYPE)13.0f * tmp[i].s[3] + (DATA_TYPE)1.0f * tmp[i].s[5];
+ com[0].s[2] = (DATA_TYPE)9.0f * tmp[i].s[2] - (DATA_TYPE)10.0f * tmp[i].s[4] + tmp[i].s[6];
+ com[0].s[3] = (DATA_TYPE)18.0f * tmp[i].s[1] - (DATA_TYPE)20.0f * tmp[i].s[3] + (DATA_TYPE)2.0f * tmp[i].s[5];
+ com[0].s[4] = (DATA_TYPE)4.0f * tmp[i].s[2] - (DATA_TYPE)5.0f * tmp[i].s[4] + tmp[i].s[6];
+ com[0].s[5] = (DATA_TYPE)12.0f * tmp[i].s[1] - (DATA_TYPE)15.0f * tmp[i].s[3] + (DATA_TYPE)3.0f * tmp[i].s[5];
+ out[i * 8 + 0].s[0] = (DATA_TYPE) - 36.0f * tmp[i].s[0] + (DATA_TYPE)49.0f * tmp[i].s[2] + -(DATA_TYPE)14.0f * tmp[i].s[4] + tmp[i].s[6];
+ out[i * 8 + 1].s[0] = com[0].s[0] - com[0].s[1];
+ out[i * 8 + 2].s[0] = com[0].s[0] + com[0].s[1];
+ out[i * 8 + 3].s[0] = com[0].s[2] - com[0].s[3];
+ out[i * 8 + 4].s[0] = com[0].s[2] + com[0].s[3];
+ out[i * 8 + 5].s[0] = com[0].s[4] - com[0].s[5];
+ out[i * 8 + 6].s[0] = com[0].s[4] + com[0].s[5];
+ out[i * 8 + 7].s[0] = -(DATA_TYPE)36.0f * tmp[i].s[1] + (DATA_TYPE)0.0f * tmp[i].s[2] + (DATA_TYPE)49.0f * tmp[i].s[3] - (DATA_TYPE)14.0f * tmp[i].s[5] + tmp[i].s[7];
+ })
+
+ TILE(uint, 64, 1, dst_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 64,
+ {
+ dst_indirect_y[i].v = mout + i *_INUM_TILES_X *_INUM_TILES_Y;
+ dst_indirect_y[i].v += bout *_INUM_TILES_X *_INUM_TILES_Y * 64;
+ })
+
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 64, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_2X2_7X7_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_2X1_7X1_STEPZ1_NHWC) || defined(WINOGRAD_INPUT_TRANSFORM_1X2_1X7_STEPZ1_NHWC)
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_4X1_3X1_STEPZ1_NHWC)
+//! @cond Doxygen_Suppress
+/** This OpenCL kernel computes the input transform when the kernel size is 3x1 and the output tile is 4x1 for data layout NHWC
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_WIDTH The src tensor's width
+ * @param[in] _ISRC_HEIGHT The src tensor's height
+ * @param[in] _INUM_TILES_X The number of tiles in the X dimension
+ * @param[in] _INUM_TILES_Y The number of tiles in the Y dimension
+ */
+//! @endcond
+__kernel void winograd_input_transform_4x1_3x1_stepz1_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ const int _ISRC_WIDTH,
+ const int _ISRC_HEIGHT,
+ const int _INUM_TILES_X,
+ const int _INUM_TILES_Y)
+{
+ winograd_input_transform_4x4_3x3_stepz1_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+ _ISRC_WIDTH,
+ _ISRC_HEIGHT,
+ _INUM_TILES_X,
+ _INUM_TILES_Y);
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_4X1_3X1_STEPZ1_NHWC)
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_4X1_5X1_STEPZ1_NHWC)
+//! @cond Doxygen_Suppress
+/** This OpenCL kernel computes the input transform when the kernel size is 5x1 and the output tile is 4x1 for data layout NHWC
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_WIDTH The src tensor's width
+ * @param[in] _ISRC_HEIGHT The src tensor's height
+ * @param[in] _INUM_TILES_X The number of tiles in the X dimension
+ * @param[in] _INUM_TILES_Y The number of tiles in the Y dimension
+ */
+//! @endcond
+__kernel void winograd_input_transform_4x1_5x1_stepz1_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ const int _ISRC_WIDTH,
+ const int _ISRC_HEIGHT,
+ const int _INUM_TILES_X,
+ const int _INUM_TILES_Y)
+{
+ winograd_input_transform_4x4_5x5_stepz1_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+ _ISRC_WIDTH,
+ _ISRC_HEIGHT,
+ _INUM_TILES_X,
+ _INUM_TILES_Y);
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_4X1_5X1_STEPZ1_NHWC)
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_2X1_7X1_STEPZ1_NHWC)
+//! @cond Doxygen_Suppress
+/** This OpenCL kernel computes the input transform when the kernel size is 7x1 and the output tile is 2x1 for data layout NHWC
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_WIDTH The src tensor's width
+ * @param[in] _ISRC_HEIGHT The src tensor's height
+ * @param[in] _INUM_TILES_X The number of tiles in the X dimension
+ * @param[in] _INUM_TILES_Y The number of tiles in the Y dimension
+ */
+//! @endcond
+__kernel void winograd_input_transform_2x1_7x1_stepz1_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ const int _ISRC_WIDTH,
+ const int _ISRC_HEIGHT,
+ const int _INUM_TILES_X,
+ const int _INUM_TILES_Y)
+{
+ winograd_input_transform_2x2_7x7_stepz1_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+ _ISRC_WIDTH,
+ _ISRC_HEIGHT,
+ _INUM_TILES_X,
+ _INUM_TILES_Y);
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_2X1_7X1_STEPZ1_NHWC)
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_1X4_1X3_STEPZ1_NHWC)
+//! @cond Doxygen_Suppress
+/** This OpenCL kernel computes the input transform when the kernel size is 1x3 and the output tile is 1x4 for data layout NHWC
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ *
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_WIDTH The src tensor's width
+ * @param[in] _ISRC_HEIGHT The src tensor's height
+ * @param[in] _INUM_TILES_X The number of tiles in the X dimension
+ * @param[in] _INUM_TILES_Y The number of tiles in the Y dimension
+ */
+//! @endcond
+__kernel void winograd_input_transform_1x4_1x3_stepz1_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ const int _ISRC_WIDTH,
+ const int _ISRC_HEIGHT,
+ const int _INUM_TILES_X,
+ const int _INUM_TILES_Y)
+{
+ winograd_input_transform_4x4_3x3_stepz1_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+ _ISRC_WIDTH,
+ _ISRC_HEIGHT,
+ _INUM_TILES_X,
+ _INUM_TILES_Y);
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_1X4_1X3_STEPZ1_NHWC)
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_1X4_1X5_STEPZ1_NHWC)
+//! @cond Doxygen_Suppress
+/** This OpenCL kernel computes the input transform when the kernel size is 1x5 and the output tile is 1x4 for data layout NHWC
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_WIDTH The src tensor's width
+ * @param[in] _ISRC_HEIGHT The src tensor's height
+ * @param[in] _INUM_TILES_X The number of tiles in the X dimension
+ * @param[in] _INUM_TILES_Y The number of tiles in the Y dimension
+ */
+//! @endcond
+__kernel void winograd_input_transform_1x4_1x5_stepz1_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ const int _ISRC_WIDTH,
+ const int _ISRC_HEIGHT,
+ const int _INUM_TILES_X,
+ const int _INUM_TILES_Y)
+{
+ winograd_input_transform_4x4_5x5_stepz1_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+ _ISRC_WIDTH,
+ _ISRC_HEIGHT,
+ _INUM_TILES_X,
+ _INUM_TILES_Y);
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_1X4_1X5_STEPZ1_NHWC)
+
+#if defined(WINOGRAD_INPUT_TRANSFORM_1X2_1X7_STEPZ1_NHWC)
+//! @cond Doxygen_Suppress
+/** This OpenCL kernel computes the input transform when the kernel size is 1x7 and the output tile is 1x2 for data layout NHWC
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_WIDTH The src tensor's width
+ * @param[in] _ISRC_HEIGHT The src tensor's height
+ * @param[in] _INUM_TILES_X The number of tiles in the X dimension
+ * @param[in] _INUM_TILES_Y The number of tiles in the Y dimension
+ */
+//! @endcond
+__kernel void winograd_input_transform_1x2_1x7_stepz1_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+ const int _ISRC_WIDTH,
+ const int _ISRC_HEIGHT,
+ const int _INUM_TILES_X,
+ const int _INUM_TILES_Y)
+{
+ winograd_input_transform_2x2_7x7_stepz1_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+ _ISRC_WIDTH,
+ _ISRC_HEIGHT,
+ _INUM_TILES_X,
+ _INUM_TILES_Y);
+}
+#endif // defined(WINOGRAD_INPUT_TRANSFORM_1X2_1X7_STEPZ1_NHWC)
+#endif // defined(NHWC)
+#endif // defined(NUM_TILES_X) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
diff --git a/src/core/CL/cl_kernels/nhwc/winograd_output_transform.cl b/src/core/CL/cl_kernels/nhwc/winograd_output_transform.cl
new file mode 100644
index 0000000000..9eb995fbb2
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/winograd_output_transform.cl
@@ -0,0 +1,1109 @@
+/*
+ * Copyright (c) 2018-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#if defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
+#if defined(VEC_SIZE) && VEC_SIZE == 2
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_2X2_7X7_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_2X1_7X1_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_1X2_1X7_NHWC)
+/** This OpenCL kernel performs Winograd output transform when the output tile is 2x2/2x1 or 1x2, the filter size 7x7/7x1 or 1x7 and the data layout is NHWC
+ *
+ * @note must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note If this kernel is used to perform Winograd output transform 7x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd output transform 1x7, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ * @note The number of output elements processed along the X direction must be passed at compile time using -DN0 e.g. -DN0=1
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] _ISRC_HEIGHT The source tensor's height
+ * @param[in] _IDST_WIDTH The destination tensor's width
+ * @param[in] _IDST_HEIGHT The destination tensor's height
+ */
+__kernel void winograd_output_transform_2x2_7x7_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bias),
+#endif // defined(HAS_BIAS)
+ int dst_size,
+ const int _ISRC_HEIGHT,
+ const int _IDST_WIDTH,
+ const int _IDST_HEIGHT)
+{
+ const int cout = GET_SPATIAL_IDX(0, N0, 0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, 1, 0); // WINOGRAD OUTPUT TILES
+#if defined(IS_BATCHED)
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+#else // defined(IS_BATCHED)
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(IS_BATCHED)
+
+ int x_out = (mout % NUM_TILES_X) * OUTPUT_TILE_W;
+ int y_out = (mout / NUM_TILES_X) * OUTPUT_TILE_H;
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ TILE(DATA_TYPE, 8, N0, in);
+ TILE(DATA_TYPE, 2, N0, out);
+ TILE(uint, 8, 1, src_indirect_y);
+
+ // Calculate the indirect Y for the source tensor
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ src_indirect_y[i].v = mout + i *_ISRC_HEIGHT;
+ src_indirect_y[i].v += bout * (int)(_ISRC_HEIGHT * 8);
+ })
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ in[i].v = 0;
+ })
+
+ // Load the values across the 8 channels to compose the 8x1 tile
+ T_LOAD_INDIRECT(DATA_TYPE, 8, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
+
+ // Compute out0 and out01
+ out[0].v = in[0].v + in[1].v + in[2].v + in[3].v + in[4].v + in[5].v + in[6].v;
+ out[1].v = -in[1].v + in[2].v - (DATA_TYPE)2.f * in[3].v + (DATA_TYPE)2.0f * in[4].v - (DATA_TYPE)3.0f * in[5].v + (DATA_TYPE)3.0f * in[6].v + in[7].v;
+
+#if defined(HAS_BIAS)
+ // Add bias
+ TILE(DATA_TYPE, 1, N0, b);
+
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 1, 0, b);
+
+ T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, 2, N0, out, b, out);
+#endif // defined(HAS_BIAS)
+
+ T_ACTIVATION(DATA_TYPE, 2, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
+
+ TILE(uint, 2, 1, dst_indirect_y);
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ LOOP_UNROLLING(int, yk, 0, 1, 2,
+ {
+ int y_c = min(y_out + yk, ((int)_IDST_HEIGHT - 1));
+ dst_indirect_y[yk].v = x_out + y_c * (int)(_IDST_WIDTH);
+ })
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ LOOP_UNROLLING(int, xk, 0, 1, 2,
+ {
+ int x_c = min(x_out + xk, ((int)_IDST_WIDTH - 1));
+ dst_indirect_y[xk].v = x_c + y_out * (int)(_IDST_WIDTH);
+ })
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 2, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ TILE(DATA_TYPE, 64, N0, in);
+ TILE(DATA_TYPE, 4, N0, out);
+ TILE(DATA_TYPE, 16, N0, tmp);
+ TILE(uint, 64, 1, src_indirect_y);
+
+ // Calculate the indirect Y for the source tensor
+ LOOP_UNROLLING(int, i, 0, 1, 64,
+ {
+ src_indirect_y[i].v = mout + i *_ISRC_HEIGHT;
+ src_indirect_y[i].v += bout * (int)(_ISRC_HEIGHT * 64);
+ })
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 64,
+ {
+ in[i].v = 0;
+ })
+
+ // Load the values across the 64 channels to compose the 8x8 tile
+ T_LOAD_INDIRECT(DATA_TYPE, 64, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
+
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ tmp[i * 2].v = in[0 + i].v + in[8 + i].v + in[16 + i].v + in[24 + i].v + in[32 + i].v + in[40 + i].v + in[48 + i].v;
+ tmp[i * 2 + 1].v = -in[8 + i].v + in[16 + i].v - (DATA_TYPE)2 * in[24 + i].v + (DATA_TYPE)2 * in[32 + i].v + (DATA_TYPE) - 3 * in[40 + i].v + (DATA_TYPE)3 * in[48 + i].v + in[56 + i].v;
+ })
+
+ // Compute the 2x2 output tile
+ LOOP_UNROLLING(int, i, 0, 1, 2,
+ {
+ out[i * 2].v = tmp[0 + i].v + tmp[2 + i].v + tmp[4 + i].v + tmp[6 + i].v + tmp[8 + i].v + tmp[10 + i].v + tmp[12 + i].v;
+ out[i * 2 + 1].v = -tmp[2 + i].v + tmp[4 + i].v - (DATA_TYPE)2 * tmp[6 + i].v + (DATA_TYPE)2 * tmp[8 + i].v - (DATA_TYPE)3 * tmp[10 + i].v + (DATA_TYPE)3 * tmp[12 + i].v + tmp[14 + i].v;
+ })
+
+#if defined(HAS_BIAS)
+ // Add bias
+ TILE(DATA_TYPE, 1, N0, b);
+
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 1, 0, b);
+
+ T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, 4, N0, out, b, out);
+#endif // defined(HAS_BIAS)
+
+ T_ACTIVATION(DATA_TYPE, 4, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
+
+ TILE(uint, 4, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, yk, 0, 1, 2,
+ {
+ LOOP_UNROLLING(int, xk, 0, 1, 2,
+ {
+ int x_c = min(x_out + xk, ((int)_IDST_WIDTH - 1));
+ int y_c = min(y_out + yk, ((int)_IDST_HEIGHT - 1));
+ dst_indirect_y[xk + yk * 2].v = x_c + y_c *_IDST_WIDTH;
+ dst_indirect_y[xk + yk * 2].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
+ })
+ })
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 4, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+}
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_2X2_7X7_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_2X1_7X1_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_1X2_1X7_NHWC)
+#endif // defined(VEC_SIZE) && VEC_SIZE == 2
+
+#if defined(VEC_SIZE) && VEC_SIZE == 4
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_4X4_3X3_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_4X1_3X1_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_1X4_1X3_NHWC)
+/** This OpenCL kernel performs Winograd output transform when the output tile is 4x4, 4x1 or 1x4, the filter size 3x3, 3x1 or 1x3 and the data layout is NHWC
+ *
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd output transform 3x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd output transform 1x3, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ * @note The number of output elements processed along the X direction must be passed at compile time using -DN0 e.g. -DN0=1
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] dst_size Size of the destination tensor, minus the last padding
+ * @param[in] SRC_HEIGHT The source tensor's height
+ * @param[in] DST_WIDTH The destination tensor's width
+ * @param[in] DST_HEIGHT The destination tensor's height
+ */
+__kernel void winograd_output_transform_4x4_3x3_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bias),
+#endif // defined(HAS_BIAS)
+ int dst_size,
+ const int SRC_HEIGHT,
+ const int DST_WIDTH,
+ const int DST_HEIGHT)
+{
+ const int cout = GET_SPATIAL_IDX(0, N0, 0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, 1, 0); // WINOGRAD OUTPUT TILES
+#if defined(IS_BATCHED)
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+#else // defined(IS_BATCHED)
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(IS_BATCHED)
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ TILE(DATA_TYPE, 6, N0, in);
+ TILE(DATA_TYPE, 4, N0, out);
+ TILE(uint, 6, 1, src_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 6,
+ {
+ src_indirect_y[i].v = mout + i *SRC_HEIGHT;
+ src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 6);
+ })
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 6,
+ {
+ in[i].v = 0;
+ })
+
+ // Load the values across the 36 channels to compose the 6x6 or 6x1 tile
+ T_LOAD_INDIRECT(DATA_TYPE, 6, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
+
+ // Compute out00, out01, out02 and out03
+ out[0].v = in[0].v + in[1].v + in[2].v + in[3].v + in[4].v;
+ out[1].v = in[1].v - in[2].v + (DATA_TYPE)2.0f * in[3].v - (DATA_TYPE)2.0f * in[4].v;
+ out[2].v = in[1].v + in[2].v + (DATA_TYPE)4.0f * in[3].v + (DATA_TYPE)4.0f * in[4].v;
+ out[3].v = in[1].v - in[2].v + (DATA_TYPE)8.0f * in[3].v - (DATA_TYPE)8.0f * in[4].v + in[5].v;
+
+#if defined(HAS_BIAS)
+ TILE(DATA_TYPE, 1, N0, b);
+
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 1, 0, b);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, 4, N0, out, b, out);
+#endif // HAS_BIAS
+
+ int x_out = (mout % NUM_TILES_X) * OUTPUT_TILE_W;
+ int y_out = (mout / NUM_TILES_X) * OUTPUT_TILE_H;
+
+ T_ACTIVATION(DATA_TYPE, 4, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
+
+ TILE(uint, 4, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ LOOP_UNROLLING(int, yk, 0, 1, 4,
+ {
+ int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1));
+ dst_indirect_y[yk].v = x_out + y_c *DST_WIDTH;
+ dst_indirect_y[yk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
+ })
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ LOOP_UNROLLING(int, xk, 0, 1, 4,
+ {
+ int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
+ dst_indirect_y[xk].v = x_c + y_out *DST_WIDTH;
+ dst_indirect_y[xk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
+ })
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 4, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ // Calculate the indirect Y for the source tensor
+ TILE(DATA_TYPE, 36, N0, in);
+ TILE(DATA_TYPE, 4, N0, tmp);
+ TILE(uint, 36, 1, src_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 36,
+ {
+ src_indirect_y[i].v = mout + i *SRC_HEIGHT;
+ src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 36);
+ })
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 36,
+ {
+ in[i].v = 0;
+ })
+
+ // Load the values across the 36 channels to compose the 6x6 or 6x1 tile
+ T_LOAD_INDIRECT(DATA_TYPE, 36, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
+
+ LOOP_UNROLLING(int, i, 0, 1, 6,
+ {
+ tmp[0].v = in[6 + i].v + in[12 + i].v;
+ tmp[1].v = in[6 + i].v - in[12 + i].v;
+ tmp[2].v = in[18 + i].v + in[24 + i].v;
+ tmp[3].v = in[18 + i].v - in[24 + i].v;
+ tmp[3].v = tmp[3].v + tmp[3].v;
+ in[i].v = in[i].v + tmp[0].v + tmp[2].v;
+ in[6 + i].v = tmp[3].v + tmp[1].v;
+ in[12 + i].v = fma(tmp[2].v, (VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[0].v);
+ in[18 + i].v = fma(tmp[3].v, (VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[1].v) + in[30 + i].v;
+ })
+
+ // Compute the output tile
+ TILE(DATA_TYPE, 16, N0, out);
+
+ LOOP_UNROLLING(int, i, 0, 1, 4,
+ {
+ tmp[0].v = in[6 * i + 1].v + in[6 * i + 2].v;
+ tmp[1].v = in[6 * i + 1].v - in[6 * i + 2].v;
+ tmp[2].v = in[6 * i + 3].v + in[6 * i + 4].v;
+ tmp[3].v = in[6 * i + 3].v - in[6 * i + 4].v;
+ tmp[3].v = tmp[3].v + tmp[3].v;
+ out[4 * i + 0].v = in[6 * i + 0].v + tmp[0].v + tmp[2].v;
+ out[4 * i + 1].v = tmp[3].v + tmp[1].v;
+ out[4 * i + 2].v = fma(tmp[2].v, (VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[0].v);
+ out[4 * i + 3].v = fma(tmp[3].v, (VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[1].v) + in[6 * i + 5].v;
+ })
+
+#if defined(HAS_BIAS)
+ TILE(DATA_TYPE, 1, N0, b);
+
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 1, 0, b);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, 16, N0, out, b, out);
+#endif // HAS_BIAS
+
+ int x_out = (mout % NUM_TILES_X) * OUTPUT_TILE_W;
+ int y_out = (mout / NUM_TILES_X) * OUTPUT_TILE_H;
+
+ T_ACTIVATION(DATA_TYPE, 16, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
+
+ TILE(uint, 16, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, yk, 0, 1, 4,
+ {
+ LOOP_UNROLLING(int, xk, 0, 1, 4,
+ {
+ int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
+ int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1));
+ dst_indirect_y[xk + yk * 4].v = x_c + y_c *DST_WIDTH;
+ dst_indirect_y[xk + yk * 4].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
+ })
+ })
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 16, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+}
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_4X4_3X3_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_4X1_3X1_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_1X4_1X3_NHWC)
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_4X4_5X5_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_4X1_5X1_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_1X4_1X5_NHWC)
+/** This OpenCL kernel performs Winograd output transform when the output tile is 4x4/4x1 or 1x4, the filter size 5x5/5x1 or 1x5 and the data layout is NHWC
+ *
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note If this kernel is used to perform Winograd output transform 5x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note If this kernel is used to perform Winograd output transform 1x5, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ * @note The number of output elements processed along the X direction must be passed at compile time using -DN0 e.g. -DN0=1
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_HEIGHT The source tensor's height
+ * @param[in] DST_WIDTH The destination tensor's width
+ * @param[in] DST_HEIGHT The destination tensor's height
+ */
+__kernel void winograd_output_transform_4x4_5x5_nhwc(
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bias),
+#endif // defined(HAS_BIAS)
+ int dst_size,
+ const int SRC_HEIGHT,
+ const int DST_WIDTH,
+ const int DST_HEIGHT)
+{
+ const int cout = GET_SPATIAL_IDX(0, N0, 0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, 1, 0); // WINOGRAD OUTPUT TILES
+#if defined(IS_BATCHED)
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+#else // defined(IS_BATCHED)
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(IS_BATCHED)
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ TILE(DATA_TYPE, 8, N0, in);
+ TILE(DATA_TYPE, 4, N0, out);
+ TILE(DATA_TYPE, 4, N0, tmp);
+ TILE(uint, 8, 1, src_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ src_indirect_y[i].v = mout + i *SRC_HEIGHT;
+ src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 8);
+ })
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ in[i].v = 0;
+ })
+
+ // "in" contains 1x8 or 8x1 tile here
+ T_LOAD_INDIRECT(DATA_TYPE, 8, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
+
+ // A^T * in, and in this degenerate case out consists of 1 column/row
+ tmp[0].v = in[1].v - in[2].v;
+ tmp[1].v = (DATA_TYPE)2.0f * (in[3].v - in[4].v);
+ tmp[2].v = (DATA_TYPE)2.0f * (in[5].v + in[6].v);
+ tmp[3].v = in[3].v + in[4].v;
+ out[0].v = in[0].v + in[1].v + in[2].v + tmp[3].v + (DATA_TYPE)4.0f * tmp[2].v;
+ out[1].v = tmp[0].v + tmp[1].v + (DATA_TYPE)4.0f * (in[5].v - in[6].v);
+ out[2].v = in[1].v + in[2].v + (DATA_TYPE)4.0f * tmp[3].v + tmp[2].v;
+ out[3].v = tmp[0].v + (DATA_TYPE)4.0f * tmp[1].v + in[5].v - in[6].v + in[7].v;
+
+#if defined(HAS_BIAS)
+ TILE(DATA_TYPE, 1, N0, b);
+
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 1, 0, b);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, 4, N0, out, b, out);
+#endif // HAS_BIAS
+
+ int x_out = (mout % NUM_TILES_X) * OUTPUT_TILE_W;
+ int y_out = (mout / NUM_TILES_X) * OUTPUT_TILE_H;
+
+ T_ACTIVATION(DATA_TYPE, 4, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
+
+ TILE(uint, 4, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ LOOP_UNROLLING(int, yk, 0, 1, 4,
+ {
+ int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1));
+ dst_indirect_y[yk].v = x_out + y_c *DST_WIDTH;
+ dst_indirect_y[yk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
+ })
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ LOOP_UNROLLING(int, xk, 0, 1, 4,
+ {
+ int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
+ dst_indirect_y[xk].v = x_c + y_out *DST_WIDTH;
+ dst_indirect_y[xk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
+ })
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 4, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ // Calculate the indirect Y for the source tensor
+ TILE(DATA_TYPE, 64, N0, in);
+ TILE(DATA_TYPE, 6, N0, tmp);
+ TILE(uint, 64, 1, src_indirect_y);
+
+ LOOP_UNROLLING(int, i, 0, 1, 64,
+ {
+ src_indirect_y[i].v = mout + i *SRC_HEIGHT;
+ src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 64);
+ })
+
+ // Initialize the input tile
+ LOOP_UNROLLING(int, i, 0, 1, 64,
+ {
+ in[i].v = 0;
+ })
+
+ // "in" here is 8x8 tile
+ T_LOAD_INDIRECT(DATA_TYPE, 64, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
+
+ // A^T * in
+ LOOP_UNROLLING(int, i, 0, 1, 8,
+ {
+ tmp[0].v = in[8 + i].v + in[16 + i].v;
+ tmp[1].v = in[8 + i].v - in[16 + i].v;
+ tmp[2].v = in[24 + i].v + in[32 + i].v;
+ tmp[3].v = in[24 + i].v - in[32 + i].v;
+ tmp[3].v = tmp[3].v + tmp[3].v;
+ tmp[4].v = in[40 + i].v + in[48 + i].v;
+ tmp[4].v = tmp[4].v + tmp[4].v;
+ tmp[5].v = in[40 + i].v - in[48 + i].v;
+
+ // 4x8 matrix as a result
+ in[i].v = in[i].v + tmp[0].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[4].v, tmp[2].v);
+ in[8 + i].v = tmp[1].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[5].v, tmp[3].v);
+ in[16 + i].v = tmp[0].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[2].v, tmp[4].v);
+ in[24 + i].v = tmp[1].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[3].v, tmp[5].v) + in[56 + i].v;
+ })
+
+ // Compute the output tile
+ TILE(DATA_TYPE, 16, N0, out);
+
+ // in * A, with in = A^T * in as above
+ LOOP_UNROLLING(int, i, 0, 1, 4,
+ {
+ tmp[0].v = in[8 * i + 1].v + in[8 * i + 2].v;
+ tmp[1].v = in[8 * i + 1].v - in[8 * i + 2].v;
+ tmp[2].v = in[8 * i + 3].v + in[8 * i + 4].v;
+ tmp[3].v = in[8 * i + 3].v - in[8 * i + 4].v;
+ tmp[3].v = tmp[3].v + tmp[3].v;
+ tmp[4].v = in[8 * i + 5].v + in[8 * i + 6].v;
+ tmp[4].v = tmp[4].v + tmp[4].v;
+ tmp[5].v = in[8 * i + 5].v - in[8 * i + 6].v;
+
+ // 4x4 tile
+ out[4 * i].v = in[8 * i].v + tmp[0].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[4].v, tmp[2].v);
+ out[4 * i + 1].v = tmp[1].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[5].v, tmp[3].v);
+ out[4 * i + 2].v = fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[2].v, tmp[0].v) + tmp[4].v;
+ out[4 * i + 3].v = fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[3].v, tmp[1].v) + tmp[5].v + in[8 * i + 7].v;
+ })
+
+#if defined(HAS_BIAS)
+ TILE(DATA_TYPE, 1, N0, b);
+
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 1, 0, b);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, 16, N0, out, b, out);
+#endif // HAS_BIAS
+
+ int x_out = (mout % NUM_TILES_X) * OUTPUT_TILE_W;
+ int y_out = (mout / NUM_TILES_X) * OUTPUT_TILE_H;
+
+ T_ACTIVATION(DATA_TYPE, 16, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
+
+ TILE(uint, 16, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, yk, 0, 1, 4,
+ {
+ LOOP_UNROLLING(int, xk, 0, 1, 4,
+ {
+ int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
+ int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1));
+ dst_indirect_y[xk + yk * 4].v = x_c + y_c *DST_WIDTH;
+ dst_indirect_y[xk + yk * 4].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
+ })
+ })
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 16, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+}
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_4X4_5X5_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_4X1_5X1_NHWC) || defined(WINOGRAD_OUTPUT_TRANSFORM_1X4_1X5_NHWC)
+#endif // defined(VEC_SIZE) && VEC_SIZE == 4
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
+#if defined(VEC_SIZE) && VEC_SIZE == 2
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_2X1_7X1_NHWC)
+/** This OpenCL kernel performs Winograd output transform when the output tile is 2x1, the filter size 7x1 and the data layout is NHWC
+ *
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
+ * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
+ * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_HEIGHT The source tensor's height
+ * @param[in] DST_WIDTH The destination tensor's width
+ * @param[in] DST_HEIGHT The destination tensor's height
+ */
+__kernel void winograd_output_transform_2x1_7x1_nhwc(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bias),
+#endif // defined(HAS_BIAS)
+ int dst_size,
+ const int SRC_HEIGHT,
+ const int DST_WIDTH,
+ const int DST_HEIGHT)
+{
+ winograd_output_transform_2x2_7x7_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+#if defined(HAS_BIAS)
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes,
+#endif // defined(HAS_BIAS)
+ dst_size,
+ SRC_HEIGHT,
+ DST_WIDTH,
+ DST_HEIGHT);
+}
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_2X1_7X1_NHWC)
+#endif // defined(VEC_SIZE) && VEC_SIZE == 2
+
+#if defined(VEC_SIZE) && VEC_SIZE == 4
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_4X1_3X1_NHWC)
+/** This OpenCL kernel performs Winograd output transform when the output tile is 4x1, the filter size 3x1 and the data layout is NHWC
+ *
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
+ * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
+ * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_HEIGHT The source tensor's height
+ * @param[in] DST_WIDTH The destination tensor's width
+ * @param[in] DST_HEIGHT The destination tensor's height
+ */
+__kernel void winograd_output_transform_4x1_3x1_nhwc(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bias),
+#endif // defined(HAS_BIAS)
+ int dst_size,
+ const int SRC_HEIGHT,
+ const int DST_WIDTH,
+ const int DST_HEIGHT)
+{
+ winograd_output_transform_4x4_3x3_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+#if defined(HAS_BIAS)
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes,
+#endif // defined(HAS_BIAS)
+ dst_size,
+ SRC_HEIGHT,
+ DST_WIDTH,
+ DST_HEIGHT);
+}
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_4X1_3X1_NHWC)
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_4X1_5X1_NHWC)
+/** This OpenCL kernel performs Winograd output transform when the output tile is 4x1, the filter size 5x1 and the data layout is NHWC
+ *
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
+ * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
+ * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_HEIGHT The source tensor's height
+ * @param[in] DST_WIDTH The destination tensor's width
+ * @param[in] DST_HEIGHT The destination tensor's height
+ */
+__kernel void winograd_output_transform_4x1_5x1_nhwc(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bias),
+#endif // defined(HAS_BIAS)
+ int dst_size,
+ const int SRC_HEIGHT,
+ const int DST_WIDTH,
+ const int DST_HEIGHT)
+{
+ winograd_output_transform_4x4_5x5_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+#if defined(HAS_BIAS)
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes,
+#endif // defined(HAS_BIAS)
+ dst_size,
+ SRC_HEIGHT,
+ DST_WIDTH,
+ DST_HEIGHT);
+}
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_4X1_5X1_NHWC)
+#endif // defined(VEC_SIZE) && VEC_SIZE == 4
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+#if defined(VEC_SIZE) && VEC_SIZE == 2
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_1X2_1X7_NHWC)
+/** This OpenCL kernel performs Winograd output transform when the output tile is 1x2, the filter size 1x7 and the data layout is NHWC
+ *
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
+ * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_HEIGHT The source tensor's height
+ * @param[in] DST_WIDTH The destination tensor's width
+ * @param[in] DST_HEIGHT The destination tensor's height
+ */
+__kernel void winograd_output_transform_1x2_1x7_nhwc(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bias),
+#endif // defined(HAS_BIAS)
+ int dst_size,
+ const int SRC_HEIGHT,
+ const int DST_WIDTH,
+ const int DST_HEIGHT)
+{
+ winograd_output_transform_2x2_7x7_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+#if defined(HAS_BIAS)
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes,
+#endif // defined(HAS_BIAS)
+ dst_size,
+ SRC_HEIGHT,
+ DST_WIDTH,
+ DST_HEIGHT);
+}
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_1X2_1X7_NHWC)
+#endif // defined(VEC_SIZE) && VEC_SIZE == 2
+
+#if defined(VEC_SIZE) && VEC_SIZE == 4
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_1X4_1X3_NHWC)
+/** This OpenCL kernel performs Winograd output transform when the output tile is 1x4, the filter size 1x3 and the data layout is NHWC
+ *
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
+ * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_HEIGHT The source tensor's height
+ * @param[in] DST_WIDTH The destination tensor's width
+ * @param[in] DST_HEIGHT The destination tensor's height
+ */
+__kernel void winograd_output_transform_1x4_1x3_nhwc(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bias),
+#endif // defined(HAS_BIAS)
+ int dst_size,
+ const int SRC_HEIGHT,
+ const int DST_WIDTH,
+ const int DST_HEIGHT)
+{
+ winograd_output_transform_4x4_3x3_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+#if defined(HAS_BIAS)
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes,
+#endif // defined(HAS_BIAS)
+ dst_size,
+ SRC_HEIGHT,
+ DST_WIDTH,
+ DST_HEIGHT);
+}
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_1X4_1X3_NHWC)
+
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_1X4_1X5_NHWC)
+/** This OpenCL kernel performs Winograd output transform when the output tile is 1x4, the filter size 1x5 and the data layout is NHWC
+ *
+ * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
+ * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
+ * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
+ * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
+ * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] SRC_HEIGHT The source tensor's height
+ * @param[in] DST_WIDTH The destination tensor's width
+ * @param[in] DST_HEIGHT The destination tensor's height
+ */
+__kernel void winograd_output_transform_1x4_1x5_nhwc(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bias),
+#endif // defined(HAS_BIAS)
+ int dst_size,
+ const int SRC_HEIGHT,
+ const int DST_WIDTH,
+ const int DST_HEIGHT)
+{
+ winograd_output_transform_4x4_5x5_nhwc(src_ptr,
+ src_stride_x,
+ src_step_x,
+ src_stride_y,
+ src_step_y,
+ src_stride_z,
+ src_step_z,
+ src_stride_w,
+ src_step_w,
+ src_offset_first_element_in_bytes,
+ dst_ptr,
+ dst_stride_x,
+ dst_step_x,
+ dst_stride_y,
+ dst_step_y,
+ dst_stride_z,
+ dst_step_z,
+ dst_stride_w,
+ dst_step_w,
+ dst_offset_first_element_in_bytes,
+#if defined(HAS_BIAS)
+ bias_ptr,
+ bias_stride_x,
+ bias_step_x,
+ bias_offset_first_element_in_bytes,
+#endif // defined(HAS_BIAS)
+ dst_size,
+ SRC_HEIGHT,
+ DST_WIDTH,
+ DST_HEIGHT);
+}
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_1X4_1X5_NHWC)
+#endif // defined(VEC_SIZE) && VEC_SIZE == 4
+#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+#endif // defined(NUM_TILES_X) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
diff --git a/src/core/CL/cl_kernels/pooling_layer.cl b/src/core/CL/cl_kernels/pooling_layer.cl
deleted file mode 100644
index 00250a08a5..0000000000
--- a/src/core/CL/cl_kernels/pooling_layer.cl
+++ /dev/null
@@ -1,981 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-#include "repeat.h"
-
-#if defined(POOL_AVG) || defined(POOL_L2)
-#define POOL_OP(x, y) ((x) + (y))
-#else /* defined(POOL_AVG) || defined(POOL_L2) */
-#define POOL_OP(x, y) (fmax((x), (y)))
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
-#define POW2_OP(x, vec_size) ((x) * (x))
-#else /* defined(POOL_L2) */
-#define POW2_OP(x, vec_size) (x)
-#endif /* defined(POOL_L2) */
-
-#define DIV_OP(x, y) (x * (1.f / y))
-#define SQRT_OP(x) sqrt((x))
-
-#if STRIDE_X == 1
-#define POOLING3x3(res, input, output) POOLING3x3_STRIDE1(res, input, output)
-#elif STRIDE_X == 2 /* STRIDE_X == 1 */
-#define POOLING3x3(res, input, output) POOLING3x3_STRIDE2(res, input, output)
-#elif STRIDE_X == 3 /* STRIDE_X not equals 1 or 2 */
-#define POOLING3x3(res, input, output) POOLING3x3_STRIDE3(res, input, output)
-#endif /* STRIDE_X == 3 */
-
-#if defined(FP_MIXED_PRECISION)
-#define CONVERT_TO_ACC_DATA_TYPE(x, n) CONVERT(x, VEC_DATA_TYPE(ACC_DATA_TYPE, n))
-#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) \
- CONVERT_TO_ACC_DATA_TYPE(vload##n(offset, ptr), n)
-#else /* defined(FP_MIXED_PRECISION) */
-#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) vload##n(offset, ptr)
-#endif /* defined(FP_MIXED_PRECISION) */
-
-#define POOLING3x3_STRIDE1(res, input, output) \
- ({ \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data00 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2) \
- data01 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0) + 4); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data10 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2) \
- data11 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0) + 4); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data20 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2) \
- data21 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0) + 4); \
- data00 = POW2_OP(data00, 4); \
- data01 = POW2_OP(data01, 2); \
- data10 = POW2_OP(data10, 4); \
- data11 = POW2_OP(data11, 2); \
- data20 = POW2_OP(data20, 4); \
- data21 = POW2_OP(data21, 2); \
- \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values00 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data00.s01212323); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values01 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data01.s0, data00.s3, data01.s01); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values10 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data10.s01212323); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values11 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data11.s0, data10.s3, data11.s01); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values20 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data20.s01212323); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values21 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data21.s0, data20.s3, data21.s01); \
- \
- values00 = POOL_OP(values00, values10); \
- values01 = POOL_OP(values01, values11); \
- values00 = POOL_OP(values00, values20); \
- values01 = POOL_OP(values01, values21); \
- \
- res = POOL_OP((VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s036, values01.s1), (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s147, values01.s2)); \
- res = POOL_OP(res, (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s25, values01.s03)); \
- })
-
-#define POOLING3x3_STRIDE2(res, input, output) \
- ({ \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data00 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)); \
- ACC_DATA_TYPE data01 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0) + 8)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data10 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); \
- ACC_DATA_TYPE data11 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0) + 8)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data20 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); \
- ACC_DATA_TYPE data21 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0) + 8)); \
- data00 = POW2_OP(data00, 8); \
- data01 = POW2_OP(data01, 1); \
- data10 = POW2_OP(data10, 8); \
- data11 = POW2_OP(data11, 1); \
- data20 = POW2_OP(data20, 8); \
- data21 = POW2_OP(data21, 1); \
- \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values00 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data00.s01223445); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values01 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data00.s667, data01); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values10 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data10.s01223445); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values11 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data10.s667, data11); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values20 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data20.s01223445); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values21 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data20.s667, data21); \
- \
- values00 = POOL_OP(values00, values10); \
- values01 = POOL_OP(values01, values11); \
- values00 = POOL_OP(values00, values20); \
- values01 = POOL_OP(values01, values21); \
- \
- res = POOL_OP((VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s036, values01.s1), (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s147, values01.s2)); \
- res = POOL_OP(res, (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s25, values01.s03)); \
- })
-
-#define POOLING3x3_STRIDE3(res, input, output) \
- ({ \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data00 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data01 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0) + 8); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data10 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data11 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0) + 8); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data20 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data21 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0) + 8); \
- data00 = POW2_OP(data00, 8); \
- data01 = POW2_OP(data01, 4); \
- data10 = POW2_OP(data10, 8); \
- data11 = POW2_OP(data11, 4); \
- data20 = POW2_OP(data20, 8); \
- data21 = POW2_OP(data21, 4); \
- \
- data00 = POOL_OP(data00, data10); \
- data01 = POOL_OP(data01, data11); \
- data00 = POOL_OP(data00, data20); \
- data01 = POOL_OP(data01, data21); \
- \
- res = POOL_OP((VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data00.s036, data01.s1), (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data00.s147, data01.s2)); \
- res = POOL_OP(res, (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data00.s25, data01.s03)); \
- })
-
-ACC_DATA_TYPE calculate_avg_scale(const int pool_size_x, const int pool_size_y, const int upper_bound_w, const int upper_bound_h,
- const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int start_x = get_global_id(0) * stride_x - pad_x;
- int start_y = get_global_id(1) * stride_y - pad_y;
- const int end_x = min(start_x + pool_size_x, upper_bound_w);
- const int end_y = min(start_y + pool_size_y, upper_bound_h);
-#if defined(EXCLUDE_PADDING)
- start_x = max(0, start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
- return ((end_y - start_y) * (end_x - start_x));
-}
-
-/** Performs a pooling function of pool size equal to 2.
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32;
- * @note In case of average pooling the following information must be passed at compile time:
- * -DPOOL_AVG or -DPOOL_L2 must be provided otherwise max pooling will be performed.
- * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void pooling_layer_2(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- // Load data
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2)
- data0 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0));
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2)
- data1 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0));
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 = POW2_OP(data0, 2);
- data1 = POW2_OP(data1, 2);
-#endif /* defined(POOL_L2) */
-
- // Perform calculations
- data0 = POOL_OP(data0, data1);
- ACC_DATA_TYPE res = POOL_OP(data0.s0, data0.s1);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- // Divide by pool region in case of average or l2 pooling
- res = DIV_OP(res, calculate_avg_scale(2, 2, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y));
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- // Store result
- *(__global DATA_TYPE *)output.ptr = (DATA_TYPE)res;
-}
-
-/** Performs a pooling function of pool size equal to 3
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32;
- * @note In case of average pooling the following information must be passed at compile time:
- * -DPOOL_AVG or -DPOOL_L2 must be provided otherwise max pooling will be performed.
- * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void pooling_layer_3(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- // Load data
- VEC_DATA_TYPE(ACC_DATA_TYPE, 3)
- data0 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(3, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0));
- VEC_DATA_TYPE(ACC_DATA_TYPE, 3)
- data1 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(3, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0));
- VEC_DATA_TYPE(ACC_DATA_TYPE, 3)
- data2 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(3, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0));
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 = POW2_OP(data0, 3);
- data1 = POW2_OP(data1, 3);
- data2 = POW2_OP(data2, 3);
-#endif /* defined(POOL_L2) */
-
- // Perform calculations
- data0 = POOL_OP(data0, data1);
- data0 = POOL_OP(data0, data2);
- ACC_DATA_TYPE res = POOL_OP(POOL_OP(data0.s0, data0.s1), data0.s2);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- // Divide by pool region in case of average pooling
- res = DIV_OP(res, calculate_avg_scale(3, 3, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y));
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- // Store result
- *(__global DATA_TYPE *)output.ptr = (DATA_TYPE)res;
-}
-
-#if defined(POOLING3x3)
-
-#define CONVERT_OP(data_type) convert_##data_type##4
-#define CONVERT_VECTOR4(data_type) CONVERT_OP(data_type)
-
-VEC_DATA_TYPE(ACC_DATA_TYPE, 4)
-calculate_avg_scale4(const int pool_size, const int upper_bound_w, const int upper_bound_h,
- const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int4 start_x = ((int4)get_global_id(0) * 4 + (int4)(0, 1, 2, 3)) * (int4)stride_x - (int4)pad_x;
- int start_y = get_global_id(1) * stride_y - pad_y;
- const int4 end_x = min(start_x + (int4)pool_size, (int4)upper_bound_w);
- const int end_y = min(start_y + pool_size, upper_bound_h);
-#if defined(EXCLUDE_PADDING)
- start_x = max((int4)0, start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
- return (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(1.f) / CONVERT_VECTOR4(ACC_DATA_TYPE)(((int4)(end_y - start_y)) * (end_x - start_x));
-}
-
-/** Performs an optimized pooling function of pool size equal to 3 when the stride_x is less equal than 3
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32;
- * @note In case of average pooling the following information must be passed at compile time:
- * -DPOOL_AVG or -DPOOL_L2 must be provided otherwise max pooling will be performed.
- * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void pooling_layer_optimized_3(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4)
- res;
-
- // Perform pooling 3x3 for 4 output elements
- POOLING3x3(res, input, output);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- // Divide by pool region in case of average pooling
- res *= calculate_avg_scale4(3, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y);
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- vstore4(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 4)), 0, (__global DATA_TYPE *)output.ptr);
-}
-#endif // defined(POOLING3x3)
-
-#if defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
-
-/** Performs a pooling function of pool size equal to N (NCHW)
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32;
- * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
- * @note In case of average pooling the following information must be passed at compile time:
- * -DPOOL_AVG must be provided otherwise max pooling will be performed.
- * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
- * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void pooling_layer_MxN_nchw(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
- vdata = INITIAL_VALUE;
- ACC_DATA_TYPE sdata = INITIAL_VALUE;
-
- // Load data
- for(int y = 0; y < POOL_SIZE_Y; y++)
- {
- int x = 0;
- for(; x <= ((int)POOL_SIZE_X - 8); x += 8)
- {
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
- data0 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, x, y, 0));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 *= data0;
-#endif /* defined(POOL_L2) */
- vdata = POOL_OP(vdata, data0);
- }
-
- // Leftover
- for(; x < (int)POOL_SIZE_X; ++x)
- {
- ACC_DATA_TYPE data0 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)tensor3D_offset(&input, x, y, 0)));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 *= data0;
-#endif /* defined(POOL_L2) */
- sdata = POOL_OP(sdata, data0);
- }
- }
-
- // Reduce result
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4)
- reduce4 = POOL_OP(vdata.s0123, vdata.s4567);
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2)
- reduce2 = POOL_OP(reduce4.s01, reduce4.s23);
- ACC_DATA_TYPE res = POOL_OP(reduce2.s0, reduce2.s1);
- res = POOL_OP(res, sdata);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- // Divide by pool region in case of average pooling
- res = DIV_OP(res, calculate_avg_scale(POOL_SIZE_X, POOL_SIZE_Y, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y));
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- // Store result
- *(__global DATA_TYPE *)output.ptr = (DATA_TYPE)res;
-}
-#endif // defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
-
-#if defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
-
-inline void offset_no_padding_nchw(const Tensor3D *input, uint *offset_top, uint *offset_bottom)
-{
- const int pad_horiz = PAD_TENSOR_LEFT + PAD_TENSOR_RIGHT;
- const int pad_vert = PAD_TENSOR_TOP + PAD_TENSOR_BOTTOM;
-
- const int x = get_global_id(0) * STRIDE_X;
- const int y = get_global_id(1) * STRIDE_Y;
- const int z = get_global_id(2);
-
- //x axis: width, y axis: height, z axis: component
- const uint padded_offset = input->offset_first_element_in_bytes
- + x * input->stride_x
- + y * input->stride_y
- + z * input->stride_z;
-
- const uint offset_base = padded_offset
- - y * pad_horiz * sizeof(DATA_TYPE) /* Horizontal padding for each row */
- - PAD_TENSOR_TOP * input->stride_y /* top padding */
- - z * MAX_HEIGHT * pad_horiz * sizeof(DATA_TYPE) - z * pad_vert * input->stride_y /* Z plane padding */
- - PAD_TENSOR_LEFT * sizeof(DATA_TYPE);
-
-#if defined(TENSOR_CHANNEL) && defined(TENSOR_WIDTH) && defined(TENSOR_HEIGHT)
- *offset_top = (uint)((offset_base / sizeof(DATA_TYPE)) % (TENSOR_CHANNEL * TENSOR_WIDTH * TENSOR_HEIGHT));
-#else /* defined(TENSOR_CHANNEL) && defined(TENSOR_WIDTH) && defined(TENSOR_HEIGHT) */
- *offset_top = (uint)(offset_base / sizeof(DATA_TYPE));
-#endif /* defined(TENSOR_CHANNEL) && defined(TENSOR_WIDTH) && defined(TENSOR_HEIGHT) */
-
- *offset_bottom = *offset_top + input->stride_y / sizeof(DATA_TYPE) - pad_horiz;
-
- return;
-}
-
-#endif //defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
-
-/** Performs a MAX pooling of pool size equal to 2, and record max value indices for NCHW.
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F32
- * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
- * @note Tensors width and height must be passed at compile time using -DMAX_WIDTH and -DMAX_HEIGHT
- * @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * @note Tensor padding values must be passed at compile time using PAD_TENSOR_LEFT, PAD_TENSOR_RIGHT, PAD_TENSOR_TOP and PAD_TENSOR_BOTTOM
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] indices_ptr Pointer to the indices tensor. Supported data types: U32
- * @param[in] indices_stride_x Stride of the indices tensor in X dimension (in bytes)
- * @param[in] indices_step_x indices_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] indices_stride_y Stride of the indices tensor in Y dimension (in bytes)
- * @param[in] indices_step_y indices_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] indices_stride_z Stride of the indices tensor in Z dimension (in bytes)
- * @param[in] indices_step_z indices_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] indices_offset_first_element_in_bytes The offset of the first element in the indices tensor
- */
-__kernel void pooling_layer_2_nchw_indices_fp32(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output),
- TENSOR3D_DECLARATION(indices))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
- Tensor3D indices = CONVERT_TO_TENSOR3D_STRUCT(indices);
-
- // Load data
- float2 data0 = VLOAD(2)(0, (__global float *)tensor3D_offset(&input, 0, 0, 0));
- float2 data1 = VLOAD(2)(0, (__global float *)tensor3D_offset(&input, 0, 1, 0));
-
- // Perform calculations
- float data0_max = POOL_OP(data0.s0, data0.s1);
- float data1_max = POOL_OP(data1.s0, data1.s1);
- float res = POOL_OP(data0_max, data1_max);
- // Store result
- *(__global float *)output.ptr = res;
-
-#if defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
-
- uint offset_top = 0;
- uint offset_bottom = 0;
-
- offset_no_padding_nchw(&input, &offset_top, &offset_bottom);
-
- uint index0 = select(offset_top + 1, offset_top, isgreaterequal(data0.s0, data0.s1));
- uint index1 = select(offset_bottom + 1, offset_bottom, isgreaterequal(data1.s0, data1.s1));
- uint index = select(index1, index0, isgreaterequal(data0_max, data1_max));
-
- *(__global uint *)indices.ptr = index;
-
-#endif //defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
-}
-
-/** Performs a MAX pooling of pool size equal to 2, and record max value indices for NCHW.
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F16
- * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
- * @note Tensors width and height must be passed at compile time using -DMAX_WIDTH and -DMAX_HEIGHT
- * @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * @note Tensor padding values must be passed at compile time using PAD_TENSOR_LEFT, PAD_TENSOR_RIGHT, PAD_TENSOR_TOP and PAD_TENSOR_BOTTOM
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] indices_ptr Pointer to the indices tensor. Supported data types: U32
- * @param[in] indices_stride_x Stride of the indices tensor in X dimension (in bytes)
- * @param[in] indices_step_x indices_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] indices_stride_y Stride of the indices tensor in Y dimension (in bytes)
- * @param[in] indices_step_y indices_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] indices_stride_z Stride of the indices tensor in Z dimension (in bytes)
- * @param[in] indices_step_z indices_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] indices_offset_first_element_in_bytes The offset of the first element in the indices tensor
- */
-__kernel void pooling_layer_2_nchw_indices_fp16(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output),
- TENSOR3D_DECLARATION(indices))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
- Tensor3D indices = CONVERT_TO_TENSOR3D_STRUCT(indices);
-
- // Load data
- half2 data0 = VLOAD(2)(0, (__global half *)tensor3D_offset(&input, 0, 0, 0));
- half2 data1 = VLOAD(2)(0, (__global half *)tensor3D_offset(&input, 0, 1, 0));
-
- // Perform calculations
- half data0_max = POOL_OP(data0.s0, data0.s1);
- half data1_max = POOL_OP(data1.s0, data1.s1);
- half res = POOL_OP(data0_max, data1_max);
- // Store result
- *(__global half *)output.ptr = res;
-
-#if defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
-
- uint offset_top = 0;
- uint offset_bottom = 0;
-
- offset_no_padding_nchw(&input, &offset_top, &offset_bottom);
-
- uint index0 = select(offset_top + 1, offset_top, isgreaterequal(data0.s0, data0.s1));
- uint index1 = select(offset_bottom + 1, offset_bottom, isgreaterequal(data1.s0, data1.s1));
- uint index = select(index1, index0, isgreaterequal(data0_max, data1_max));
-
- *(__global uint *)indices.ptr = index;
-
-#endif //defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
-}
-
-#if defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_CHANNELS) && defined(DST_HEIGHT) && defined(DST_BATCH_SIZE) && defined(ACC_DATA_TYPE)
-
-#if defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
-/** Performs pooling layer of size equal to MxN. This OpenCL kernel can perform the following pooling types:
- * -# max, -DPOOL_MAX must be passed at compile time
- * -# average, -DPOOL_AVG must be passed at compile time. If padding has to be expluded, -DEXCLUDE_PADDING should be passed at compile time
- * -# l2 normalisation, -DPOOL_L2 must be passed at compile time
- *
- * @note Datatype must be passed at compile type using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F32/F16
- * @note Accumulation data type must be passed at compile time using -DACC_DATA_TYPE e.g. -DACC_DATA_TYPE=float
- * @note If -DFP_MIXED_PRECISION is passed at compile time, the kernel will use F32 for the partial result
- * @note Pool size must be passed at compile time using -DPOOL_SIZE_X and -DPOOL_SIZE_Y. e.g. -DPOOL_SIZE_X=4, -DPOOL_SIZE_Y=4
- * @note Input tensor width and height must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT
- * @note Output tensor height, channels and batch size must be passed at compile time using -DDST_HEIGHT, -DDST_CHANNELS and -DDST_BATCH_SIZE
- * @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * @note Pool pads must be passed at compile time using -DPAD_X and -DPAD_Y
- * @note Vector size must be passed at compile time using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size must be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
- * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] output_step_w output_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void pooling_layer_MxN_nhwc(
- TENSOR4D_DECLARATION(input),
- TENSOR4D_DECLARATION(output))
-{
- // Note: If C is not multiple of VEC_SIZE, we shift back of VEC_SIZE_LEFTOVER elements to compute the leftover elements for get_global_id(0) == 0
- // Note: If C is less than VEC_SIZE, VEC_SIZE should be SHRINKED to the closest smaller VEC_SIZE. This operation is performed on the host side
- int offset_c = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0) * sizeof(DATA_TYPE);
- int idx_out_w = get_global_id(1);
-#if DST_BATCH_SIZE != 1
- // If batch size != 1, the batch size dimension is collapsed over the height dimension
- int idx_out_h = get_global_id(2) % DST_HEIGHT;
- int idx_out_n = get_global_id(2) / DST_HEIGHT;
-#else //DST_BATCH_SIZE != 1
- int idx_out_h = get_global_id(2);
- int idx_out_n = 0;
-#endif // DST_BATCH_SIZE != 1
-
- int idx_in_w = idx_out_w * STRIDE_X - PAD_X;
- int idx_in_h = idx_out_h * STRIDE_Y - PAD_Y;
-
- int pool_x_s = max((int)0, -idx_in_w);
- int pool_x_e = min((int)POOL_SIZE_X, (int)SRC_WIDTH - idx_in_w);
- int pool_y_s = max((int)0, -idx_in_h);
- int pool_y_e = min((int)POOL_SIZE_Y, (int)SRC_HEIGHT - idx_in_h);
-
- __global unsigned char *in_base_ptr = input_ptr + input_offset_first_element_in_bytes +
- offset_c +
- idx_out_n * input_stride_w;
-
- __global unsigned char *out_base_ptr = output_ptr + output_offset_first_element_in_bytes +
- offset_c +
- idx_out_w * output_stride_y +
- idx_out_h * output_stride_z +
- idx_out_n * output_stride_w;
-
-#if ((defined(POOL_AVG) || defined(POOL_L2)))
-#if defined(EXCLUDE_PADDING)
- int filter_size = 0;
-#else // defined(EXCLUDE_PADDING)
- int filter_size = POOL_SIZE_X * POOL_SIZE_Y;
-#endif // defined(EXCLUDE_PADDING)
-#endif // ((defined(POOL_AVG) || defined(POOL_L2)))
-
- VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
- res0 = INITIAL_VALUE;
-
- for(int y = pool_y_s; y < pool_y_e; ++y)
- {
- for(int x = pool_x_s; x < pool_x_e; ++x)
- {
- VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE) data0;
-#if defined(FP_MIXED_PRECISION)
- // In case of FP_MIXED_PRECISION, ACC_DATA_TYPE is != DATA_TYPE
- data0 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + (x + idx_in_w) * input_stride_y + (y + idx_in_h) * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
-#else // defined(FP_MIXED_PRECISION)
- data0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + (x + idx_in_w) * input_stride_y + (y + idx_in_h) * input_stride_z));
-#endif // defined(FP_MIXED_PRECISION)
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 *= data0;
-#endif // defined(POOL_L2)
- res0 = POOL_OP(res0, data0);
-
-#if ((defined(POOL_AVG) || defined(POOL_L2))) && defined(EXCLUDE_PADDING)
- filter_size++;
-#endif // ((defined(POOL_AVG) || defined(POOL_L2))) && defined(EXCLUDE_PADDING)
- }
- }
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- res0 /= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))filter_size;
-#endif // defined(POOL_AVG) || defined(POOL_L2)
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res0 = SQRT_OP(res0);
-#endif // defined(POOL_L2)
-
- // Store result
-#if defined(FP_MIXED_PRECISION)
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) res_converted0 = CONVERT(res0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
- STORE_VECTOR_SELECT(res_converted, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
-#else // defined(FP_MIXED_PRECISION)
- STORE_VECTOR_SELECT(res, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
-#endif // defined(FP_MIXED_PRECISION)
-}
-#endif // defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
-
-#define SELECT_TYPE SELECT_VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
-
-/** Performs pooling layer of size equal to 2. This OpenCL kernel can perform the following pooling types:
- * -# max, -DPOOL_MAX must be passed at compile time
- * -# max extracting the max index, -DPOOL_MAX and -DEXTRACT_MAX_INDEX must be passed at compile time
- * -# average, -DPOOL_AVG must be passed at compile time. If padding has to be expluded, -DEXCLUDE_PADDING should be passed at compile time
- * -# l2 normalisation, -DPOOL_L2 must be passed at compile time
- *
- * @note Datatype must be passed at compile type using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F32/F16
- * @note Accumulation data type must be passed at compile time using -DACC_DATA_TYPE e.g. -DACC_DATA_TYPE=float
- * @note If -DFP_MIXED_PRECISION is passed at compile time, the kernel will use F32 for the partial result
- * @note Input tensor width and height must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT
- * @note Output tensor height, channels and batch size must be passed at compile time using -DDST_HEIGHT, -DDST_CHANNELS and -DDST_BATCH_SIZE
- * @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * @note Pool pads must be passed at compile time using -DPAD_X and -DPAD_Y
- * @note Vector size must be passed at compile time using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size must be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
- * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] output_step_w output_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] indices_ptr (Optional) Pointer to the indices tensor. Supported data types: U32
- * @param[in] indices_stride_x (Optional) Stride of the indices tensor in X dimension (in bytes)
- * @param[in] indices_step_x (Optional) indices_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] indices_stride_y (Optional) Stride of the indices tensor in Y dimension (in bytes)
- * @param[in] indices_step_y (Optional) indices_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] indices_stride_z (Optional) Stride of the indices tensor in Z dimension (in bytes)
- * @param[in] indices_step_z (Optional) indices_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] indices_stride_w (Optional) Stride of the indices tensor in W dimension (in bytes)
- * @param[in] indices_step_w (Optional) indices_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] indices_offset_first_element_in_bytes (Optional) The offset of the first element in the indices tensor
- */
-__kernel void pooling_layer_2x2_nhwc(
- TENSOR4D_DECLARATION(input),
- TENSOR4D_DECLARATION(output)
-#if defined(EXTRACT_MAX_INDEX) && defined(POOL_MAX)
- ,
- TENSOR4D_DECLARATION(indices)
-#endif // defined(EXTRACT_MAX_INDEX) && defined(POOL_MAX)
-)
-{
- // Note: If C is not multiple of VEC_SIZE, we shift back of VEC_SIZE_LEFTOVER elements to compute the leftover elements for get_global_id(0) == 0
- // Note: If C is less than VEC_SIZE, VEC_SIZE should be SHRINKED to the closest smaller VEC_SIZE. This operation is performed on the host side
- int idx_out_c = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
- int idx_out_w = get_global_id(1);
-#if DST_BATCH_SIZE != 1
- // If batch size != 1, the batch size dimension is collapsed over the height dimension
- int idx_out_h = get_global_id(2) % DST_HEIGHT;
- int idx_out_n = get_global_id(2) / DST_HEIGHT;
-#else //SRC_BATCH_SIZE != 1
- int idx_out_h = get_global_id(2);
- int idx_out_n = 0;
-#endif // SRC_BATCH_SIZE != 1
-
- int idx_in_w = idx_out_w * STRIDE_X - PAD_X;
- int idx_in_h = idx_out_h * STRIDE_Y - PAD_Y;
-
- __global unsigned char *in_base_ptr = input_ptr + input_offset_first_element_in_bytes +
- idx_out_c * sizeof(DATA_TYPE) +
- idx_out_n * input_stride_w;
-
- __global unsigned char *out_base_ptr = output_ptr + output_offset_first_element_in_bytes +
- idx_out_c * sizeof(DATA_TYPE) +
- idx_out_w * output_stride_y +
- idx_out_h * output_stride_z +
- idx_out_n * output_stride_w;
-
- int pool_x_s = max((int)0, -idx_in_w);
- int pool_x_e = min((int)2, (int)SRC_WIDTH - idx_in_w);
- int pool_y_s = max((int)0, -idx_in_h);
- int pool_y_e = min((int)2, (int)SRC_HEIGHT - idx_in_h);
-
- int filter_size = (pool_x_e - pool_x_s) * (pool_y_e - pool_y_s);
-
- int x0 = pool_x_s + idx_in_w;
- int y0 = pool_y_s + idx_in_h;
- int x1 = pool_x_e - 1 + idx_in_w;
- int y1 = pool_y_e - 1 + idx_in_h;
-
- REPEAT_VAR_INIT_TO_CONST(4, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE), data, 0);
-
-#if defined(FP_MIXED_PRECISION)
- // In case of FP_MIXED_PRECISION, ACC_DATA_TYPE is != DATA_TYPE
- data0 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x0 * input_stride_y + y0 * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
- data1 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x1 * input_stride_y + y0 * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
- data2 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x0 * input_stride_y + y1 * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
- data3 = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x1 * input_stride_y + y1 * input_stride_z)), VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
-#else // defined(FP_MIXED_PRECISION)
- data0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x0 * input_stride_y + y0 * input_stride_z));
- data1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x1 * input_stride_y + y0 * input_stride_z));
- data2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x0 * input_stride_y + y1 * input_stride_z));
- data3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(in_base_ptr + x1 * input_stride_y + y1 * input_stride_z));
-#endif // defined(FP_MIXED_PRECISION)
-
-#if !defined(POOL_MAX)
- if(filter_size != 4)
- {
- SELECT_TYPE cond_w_s = (SELECT_TYPE)idx_in_w < (SELECT_TYPE)0;
- SELECT_TYPE cond_w_e = (SELECT_TYPE)idx_in_w >= (SELECT_TYPE)(SRC_WIDTH - 1);
- SELECT_TYPE cond_h_s = (SELECT_TYPE)idx_in_h < (SELECT_TYPE)0;
- SELECT_TYPE cond_h_e = (SELECT_TYPE)idx_in_h >= (SELECT_TYPE)(SRC_HEIGHT - 1);
-
- // Make invalid the values loaded if the x or y coordinate was clamped (out-of-bound)
- data0 = select(data0, (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))INITIAL_VALUE, (SELECT_TYPE)(cond_w_s | cond_h_s));
- data1 = select(data1, (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))INITIAL_VALUE, (SELECT_TYPE)(cond_w_e | cond_h_s));
- data2 = select(data2, (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))INITIAL_VALUE, (SELECT_TYPE)(cond_w_s | cond_h_e));
- data3 = select(data3, (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))INITIAL_VALUE, (SELECT_TYPE)(cond_w_e | cond_h_e));
- }
-#endif // !defined(POOL_MAX)
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 *= data0;
- data1 *= data1;
- data2 *= data2;
- data3 *= data3;
-#endif /* defined(POOL_L2) */
-
- VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
- res0 = data0;
- res0 = POOL_OP(res0, data1);
- res0 = POOL_OP(res0, data2);
- res0 = POOL_OP(res0, data3);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
-#if defined(EXCLUDE_PADDING)
- res0 /= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))filter_size;
-#else // !defined(EXCLUDE_PADDING)
- res0 /= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))4;
-#endif // defined(EXCLUDE_PADDING)
-#endif // defined(POOL_AVG) || defined(POOL_L2)
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res0 = SQRT_OP(res0);
-#endif // defined(POOL_L2)
-
- // Store result
-#if defined(FP_MIXED_PRECISION)
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) res_converted0 = CONVERT(res0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
- STORE_VECTOR_SELECT(res_converted, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
-#else // defined(FP_MIXED_PRECISION)
- STORE_VECTOR_SELECT(res, DATA_TYPE, out_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, (VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0);
-#endif // defined(FP_MIXED_PRECISION)
-
-#if defined(EXTRACT_MAX_INDEX) && defined(POOL_MAX)
-
- // This part is used to return the index of the maximum value
- // Note: DST_CHANNELS and DST_BATCH_SIZE can be used for either the input and output tensor
-
- // note: Batch dimension does not contribute in the offset contribution
- VEC_DATA_TYPE(uint, VEC_SIZE) base_index = (uint)idx_out_c;
-
- base_index += VEC_OFFS(uint, VEC_SIZE);
-
- VEC_DATA_TYPE(uint, VEC_SIZE) index0 = base_index + (uint)x0 * DST_CHANNELS + (uint)y0 * (DST_CHANNELS * SRC_WIDTH);
- VEC_DATA_TYPE(uint, VEC_SIZE) index1 = base_index + (uint)x1 * DST_CHANNELS + (uint)y0 * (DST_CHANNELS * SRC_WIDTH);
- VEC_DATA_TYPE(uint, VEC_SIZE) index2 = base_index + (uint)x0 * DST_CHANNELS + (uint)y1 * (DST_CHANNELS * SRC_WIDTH);
- VEC_DATA_TYPE(uint, VEC_SIZE) index3 = base_index + (uint)x1 * DST_CHANNELS + (uint)y1 * (DST_CHANNELS * SRC_WIDTH);
-
- index0 = select(index1, index0, CONVERT(isgreaterequal(data0, data1), VEC_DATA_TYPE(int, VEC_SIZE)));
- index1 = select(index3, index2, CONVERT(isgreaterequal(data2, data3), VEC_DATA_TYPE(int, VEC_SIZE)));
- index0 = select(index1, index0, CONVERT(isgreaterequal(max(data0, data1), max(data2, data3)), VEC_DATA_TYPE(int, VEC_SIZE)));
-
- __global unsigned char *idx_base_ptr = indices_ptr + indices_offset_first_element_in_bytes +
- idx_out_c * sizeof(uint) +
- idx_out_w * indices_stride_y +
- idx_out_h * indices_stride_z +
- idx_out_n * indices_stride_w;
-
- // Store result
- STORE_VECTOR_SELECT(index, uint, idx_base_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, ((VEC_SIZE_LEFTOVER != 0) && get_global_id(0) == 0));
-#endif // defined(EXTRACT_MAX_INDEX) && defined(POOL_MAX)
-}
-#endif // defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_CHANNELS) && defined(DST_HEIGHT) && defined(DST_BATCH_SIZE) && defined(ACC_DATA_TYPE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/remap.cl b/src/core/CL/cl_kernels/remap.cl
deleted file mode 100644
index 0f013c5127..0000000000
--- a/src/core/CL/cl_kernels/remap.cl
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-#include "warp_helpers.h"
-
-/** Performs a remapping of an input image to an output given two remapping image using nearest neighbor as interpolation.
- *
- * This kernel performs remapping with this method of pixel coordinate translation:
- * out(x,y) = in(mapx(x,y), mapy(x,y));
- *
- * @param[in] in_ptr Pointer to the source image. Supported data types: U8.
- * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] in_step_x in_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] in_step_y in_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] in_offset_first_element_in_bytes Offset of the first element in the source image
- * @param[out] out_ptr Pointer to the destination image. Supported data types: U8.
- * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] out_step_x out_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] out_step_y out_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] out_offset_first_element_in_bytes Offset of the first element in the destination image
- * @param[in] mapx_ptr Pointer to the x remapping image. Supported data types: F32.
- * @param[in] mapx_stride_x Stride of the remapping image in X dimension (in bytes)
- * @param[in] mapx_step_x mapx_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] mapx_stride_y Stride of the remapping image in Y dimension (in bytes)
- * @param[in] mapx_step_y mapy_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] mapx_offset_first_element_in_bytes Offset of the first element in the remapping image
- * @param[in] mapy_ptr Pointer to the x remapping image. Supported data types: F32.
- * @param[in] mapy_stride_x Stride of the remapping image in X dimension (in bytes)
- * @param[in] mapy_step_x mapy_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] mapy_stride_y Stride of the remapping image in Y dimension (in bytes)
- * @param[in] mapy_step_y mapy_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] mapy_offset_first_element_in_bytes Offset of the first element in the remapping image
- * @param[in] width Width of the input image
- * @param[in] height Height of the input image
- */
-__kernel void remap_nearest_neighbour(
- IMAGE_DECLARATION(in),
- IMAGE_DECLARATION(out),
- IMAGE_DECLARATION(mapx),
- IMAGE_DECLARATION(mapy),
- const float width,
- const float height)
-{
- Image in = CONVERT_TO_IMAGE_STRUCT_NO_STEP(in);
- Image out = CONVERT_TO_IMAGE_STRUCT(out);
- Image mapx = CONVERT_TO_IMAGE_STRUCT(mapx);
- Image mapy = CONVERT_TO_IMAGE_STRUCT(mapy);
-
- float4 mapx_coords = vload4(0, (__global float *)mapx.ptr);
- float4 mapy_coords = vload4(0, (__global float *)mapy.ptr);
- float8 map_coords = (float8)(mapx_coords.s0, mapy_coords.s0, mapx_coords.s1, mapy_coords.s1,
- mapx_coords.s2, mapy_coords.s2, mapx_coords.s3, mapy_coords.s3);
- map_coords += (float8)(0.5f);
-
- vstore4(read_texels4(&in, convert_int8(clamp_to_border(map_coords, width, height))), 0, out.ptr);
-}
-
-/** Performs a remapping of an input image to an output given two remapping image using bilinear as interpolation.
- *
- * This kernel performs remapping with this method of pixel coordinate translation:
- * out(x,y) = in(mapx(x,y), mapy(x,y));
- *
- * @param[in] in_ptr Pointer to the source image. Supported data types: U8.
- * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] in_step_x in_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] in_step_y in_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] in_offset_first_element_in_bytes Offset of the first element in the source image
- * @param[out] out_ptr Pointer to the destination image. Supported data types: U8.
- * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] out_step_x out_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] out_step_y out_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] out_offset_first_element_in_bytes Offset of the first element in the destination image
- * @param[in] mapx_ptr Pointer to the x remapping image. Supported data types: F32.
- * @param[in] mapx_stride_x Stride of the remapping image in X dimension (in bytes)
- * @param[in] mapx_step_x mapx_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] mapx_stride_y Stride of the remapping image in Y dimension (in bytes)
- * @param[in] mapx_step_y mapy_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] mapx_offset_first_element_in_bytes Offset of the first element in the remapping image
- * @param[in] mapy_ptr Pointer to the x remapping image. Supported data types: F32.
- * @param[in] mapy_stride_x Stride of the remapping image in X dimension (in bytes)
- * @param[in] mapy_step_x mapy_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] mapy_stride_y Stride of the remapping image in Y dimension (in bytes)
- * @param[in] mapy_step_y mapy_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] mapy_offset_first_element_in_bytes Offset of the first element in the remapping image
- * @param[in] width Width of the input image
- * @param[in] height Height of the input image
- */
-__kernel void remap_bilinear(
- IMAGE_DECLARATION(in),
- IMAGE_DECLARATION(out),
- IMAGE_DECLARATION(mapx),
- IMAGE_DECLARATION(mapy),
- const float width,
- const float height)
-{
- Image in = CONVERT_TO_IMAGE_STRUCT_NO_STEP(in);
- Image out = CONVERT_TO_IMAGE_STRUCT(out);
- Image mapx = CONVERT_TO_IMAGE_STRUCT(mapx);
- Image mapy = CONVERT_TO_IMAGE_STRUCT(mapy);
-
- float4 mapx_coords = vload4(0, (__global float *)mapx.ptr);
- float4 mapy_coords = vload4(0, (__global float *)mapy.ptr);
- float8 map_coords = (float8)(mapx_coords.s0, mapy_coords.s0, mapx_coords.s1, mapy_coords.s1,
- mapx_coords.s2, mapy_coords.s2, mapx_coords.s3, mapy_coords.s3);
-
- vstore4(bilinear_interpolate(&in, clamp_to_border(map_coords, width, height), width, height), 0, out.ptr);
-}
diff --git a/src/core/CL/cl_kernels/repeat.h b/src/core/CL/cl_kernels/repeat.h
index bed94a7b3b..cb2f4b0319 100644
--- a/src/core/CL/cl_kernels/repeat.h
+++ b/src/core/CL/cl_kernels/repeat.h
@@ -75,7 +75,9 @@
P_X##_DEF(F, P_A, P_B, P_C); \
REPEAT_3_15(P_X, P_A, P_B, P_C)
-#define REPEAT_DEF_3_N(P_NUM, P_OP, P_A, P_B, P_C) REPEAT_3_##P_NUM(P_OP, P_A, P_B, P_C) //One level of indirection to ensure order of expansion does not affect preprocessing P_NUM
+#define REPEAT_DEF_3_N(P_NUM, P_OP, P_A, P_B, P_C) \
+ REPEAT_3_##P_NUM(P_OP, P_A, P_B, \
+ P_C) //One level of indirection to ensure order of expansion does not affect preprocessing P_NUM
#define REPEAT_3_N(P_NUM, P_OP, P_A, P_B, P_C) REPEAT_DEF_3_N(P_NUM, P_OP, P_A, P_B, P_C)
// Repeat macros with 4 param, excluding the implicit ID param
@@ -126,52 +128,59 @@
P_X##_DEF(F, P_A, P_B, P_C, P_D); \
REPEAT_4_15(P_X, P_A, P_B, P_C, P_D)
-#define REPEAT_DEF_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) REPEAT_4_##P_NUM(P_OP, P_A, P_B, P_C, P_D) //One level of indirection to ensure order of expansion does not affect preprocessing P_NUM
+#define REPEAT_DEF_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) \
+ REPEAT_4_##P_NUM(P_OP, P_A, P_B, P_C, \
+ P_D) //One level of indirection to ensure order of expansion does not affect preprocessing P_NUM
#define REPEAT_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) REPEAT_DEF_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D)
// Macro for initializing N variables. Generates N statements that defines VAR##N = RHS_ACCESSOR_DEF(...)
-#define VAR_INIT_TO_CONST_DEF(ID, TYPE, VAR, VAL) TYPE VAR##ID = VAL
+#define VAR_INIT_TO_CONST_DEF(ID, TYPE, VAR, VAL) TYPE VAR##ID = VAL
#define REPEAT_VAR_INIT_TO_CONST(N, TYPE, VAR, VAL) REPEAT_3_N(N, VAR_INIT_TO_CONST, TYPE, VAR, VAL)
// Macro for initializing N variables by converting the data type. Generates N statements that defines VAR##N = RHS_ACCESSOR_DEF(...)
-#define VAR_INIT_CONVERT_DEF(ID, TYPE_OUT, VAR_IN, VAR_OUT) TYPE_OUT VAR_OUT##ID = CONVERT(VAR_IN##ID, TYPE_OUT)
+#define VAR_INIT_CONVERT_DEF(ID, TYPE_OUT, VAR_IN, VAR_OUT) TYPE_OUT VAR_OUT##ID = CONVERT(VAR_IN##ID, TYPE_OUT)
#define REPEAT_VAR_INIT_CONVERT(N, TYPE_OUT, VAR_IN, VAR_OUT) REPEAT_3_N(N, VAR_INIT_CONVERT, TYPE_OUT, VAR_IN, VAR_OUT)
// Macro for initializing N variables by converting the data type with saturation. Generates N statements that defines VAR##N = RHS_ACCESSOR_DEF(...)
#define VAR_INIT_CONVERT_SAT_DEF(ID, TYPE_OUT, VAR_IN, VAR_OUT) TYPE_OUT VAR_OUT##ID = CONVERT_SAT(VAR_IN##ID, TYPE_OUT)
-#define REPEAT_VAR_INIT_CONVERT_SAT(N, TYPE_OUT, VAR_IN, VAR_OUT) REPEAT_3_N(N, VAR_INIT_CONVERT_SAT, TYPE_OUT, VAR_IN, VAR_OUT)
+#define REPEAT_VAR_INIT_CONVERT_SAT(N, TYPE_OUT, VAR_IN, VAR_OUT) \
+ REPEAT_3_N(N, VAR_INIT_CONVERT_SAT, TYPE_OUT, VAR_IN, VAR_OUT)
// Macro for adding a constant to N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
-#define ADD_CONST_TO_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID += (TYPE)VAL
+#define ADD_CONST_TO_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID += (TYPE)VAL
#define REPEAT_ADD_CONST_TO_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, ADD_CONST_TO_VAR, TYPE, VAR, VAL)
// Macro for multiplying N variables (VAR_B) by a constant (VAL) and adding to other N variables (VAR_A). Generates N statements that defines VAR_A##N =RHS_ACCESSOR_DEF(...)
-#define MLA_VAR_WITH_CONST_VEC_DEF(ID, VAR_A, VAR_B, VAL) VAR_A##ID += VAR_B##ID * VAL
+#define MLA_VAR_WITH_CONST_VEC_DEF(ID, VAR_A, VAR_B, VAL) VAR_A##ID += VAR_B##ID * VAL
#define REPEAT_MLA_VAR_WITH_CONST_VEC(N, VAR_A, VAR_B, VAL) REPEAT_3_N(N, MLA_VAR_WITH_CONST_VEC, VAR_A, VAR_B, VAL)
// Macro for adding a vector to N-variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
#define ADD_VECTOR_TO_VAR_DEF(ID, TYPE, VAR, VEC) VAR##ID += VEC
-#define REPEAT_ADD_VECTOR_TO_VAR(N, VAR, VEC) REPEAT_3_N(N, ADD_VECTOR_TO_VAR, "", VAR, VEC)
+#define REPEAT_ADD_VECTOR_TO_VAR(N, VAR, VEC) REPEAT_3_N(N, ADD_VECTOR_TO_VAR, "", VAR, VEC)
// Macro for adding a two N-variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
#define ADD_TWO_VARS_DEF(ID, TYPE, VAR_A, VAR_B) VAR_A##ID += VAR_B##ID
-#define REPEAT_ADD_TWO_VARS(N, VAR_A, VAR_B) REPEAT_3_N(N, ADD_TWO_VARS, "", VAR_A, VAR_B)
+#define REPEAT_ADD_TWO_VARS(N, VAR_A, VAR_B) REPEAT_3_N(N, ADD_TWO_VARS, "", VAR_A, VAR_B)
// Macro for performing Max between a constant and N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
-#define MAX_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = max(VAR##ID, (TYPE)VAL)
+#define MAX_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = max(VAR##ID, (TYPE)VAL)
#define REPEAT_MAX_CONST_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, MAX_CONST_VAR, TYPE, VAR, VAL)
// Macro for performing Min between a constant and N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
-#define MIN_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = min(VAR##ID, (TYPE)VAL)
+#define MIN_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = min(VAR##ID, (TYPE)VAL)
#define REPEAT_MIN_CONST_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, MIN_CONST_VAR, TYPE, VAR, VAL)
// Macro for performing ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE to N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
-#define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE)
-#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT)
+#define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) \
+ VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE)
+#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) \
+ REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT)
// Macro for performing ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE to N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
-#define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE)
-#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT)
+#define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) \
+ VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE)
+#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) \
+ REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT)
// Macro for performing per-channel ASYMM_MULT_BY_QUANT_MULTIPLIER to N variables.
#define ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) \
@@ -182,6 +191,7 @@
VAR##ID_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, N0); \
VAR##ID = select(VAR##ID_shift_lt0, VAR##ID_shift_gt0, RES_SHIFT >= 0); \
})
-#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL, SIZE, VAR, RES_MUL, RES_SHIFT)
+#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL(N, SIZE, VAR, RES_MUL, RES_SHIFT) \
+ REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL, SIZE, VAR, RES_MUL, RES_SHIFT)
#endif // ARM_COMPUTE_REPEAT_H
diff --git a/src/core/CL/cl_kernels/scale.cl b/src/core/CL/cl_kernels/scale.cl
deleted file mode 100644
index d4c27e6cf6..0000000000
--- a/src/core/CL/cl_kernels/scale.cl
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-#include "warp_helpers.h"
-
-/** Transforms four 2D coordinates. This is used to map the output coordinates to the input coordinates.
- *
- * @param[in] coord 2D coordinates to transform.
- * @param[in] scale input/output scale ratio
- *
- * @return a float8 containing 4 2D transformed values in the input image.
- */
-inline const float8 transform_nearest(const float2 coord, const float2 scale)
-{
-#ifdef SAMPLING_POLICY_TOP_LEFT
- const float4 in_x_coords = (float4)(coord.s0, 1 + coord.s0, 2 + coord.s0, 3 + coord.s0);
- const float4 new_x = in_x_coords * (float4)(scale.s0);
- const float4 new_y = (float4)(coord.s1 * scale.s1);
- return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
-#elif SAMPLING_POLICY_CENTER
- const float4 in_x_coords = (float4)(coord.s0, 1 + coord.s0, 2 + coord.s0, 3 + coord.s0);
- const float4 new_x = (in_x_coords + ((float4)(0.5f))) * (float4)(scale.s0);
- const float4 new_y = (float4)((coord.s1 + 0.5f) * scale.s1);
- return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
-#else /* SAMPLING_POLICY */
-#error("Unsupported sampling policy");
-#endif /* SAMPLING_POLICY */
-}
-
-/** Transforms four 2D coordinates. This is used to map the output coordinates to the input coordinates.
- *
- * @param[in] coord 2D coordinates to transform.
- * @param[in] scale input/output scale ratio
- *
- * @return a float8 containing 4 2D transformed values in the input image.
- */
-inline const float8 transform_bilinear(const float2 coord, const float2 scale)
-{
- const float4 in_x_coords = (float4)(coord.s0, 1 + coord.s0, 2 + coord.s0, 3 + coord.s0);
-#ifdef SAMPLING_POLICY_TOP_LEFT
- const float4 new_x = in_x_coords * (float4)(scale.s0);
- const float4 new_y = (float4)(coord.s1 * scale.s1);
- return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
-#elif SAMPLING_POLICY_CENTER
- const float4 new_x = (in_x_coords + ((float4)(0.5f))) * (float4)(scale.s0) - (float4)(0.5f);
- const float4 new_y = (float4)((coord.s1 + 0.5f) * scale.s1 - 0.5f);
- return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
-#else /* SAMPLING_POLICY */
-#error("Unsupported sampling policy");
-#endif /* SAMPLING_POLICY */
-}
-
-/** Performs an affine transformation on an image interpolating with the NEAREAST NEIGHBOUR method. Input and output are single channel U8 or S16.
- *
- * @note Sampling policy to used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
- *
- * @param[in] in_ptr Pointer to the source image. Supported data types: U8, S16.
- * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] in_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] in_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] out_ptr Pointer to the destination image. Supported data types: U8, S16. (Must be the same as the input)
- * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] out_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] out_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] input_width Input image width
- * @param[in] input_height Input image height
- * @param[in] scale_x The scale factor along x dimension
- * @param[in] scale_y The scale factor along y dimension
- */
-__kernel void scale_nearest_neighbour_nchw(
- IMAGE_DECLARATION(in),
- IMAGE_DECLARATION(out),
- const float input_width,
- const float input_height,
- const float scale_x,
- const float scale_y)
-{
- Image in = CONVERT_TO_IMAGE_STRUCT_NO_STEP(in);
- Image out = CONVERT_TO_IMAGE_STRUCT(out);
- const float2 r = (float2)(scale_x, scale_y);
- float8 transformed = transform_nearest(get_current_coords(), r);
-#ifdef ALIGN_CORNERS
- transformed = round(transformed);
-#endif // ALIGN_CORNERS
- const float8 tc = clamp_to_border_with_size(transformed, input_width, input_height, BORDER_SIZE);
- vstore4(read_texels4(&in, convert_int8(tc)), 0, (__global DATA_TYPE *)out.ptr);
-}
-
-/** Performs an affine transformation on an image interpolating with the BILINEAR method.
- *
- * @note Sampling policy to used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
- *
- * @param[in] in_ptr Pointer to the source image. Supported data types: U8, S16.
- * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] in_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] in_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] out_ptr Pointer to the destination image. Supported data types: U8, S16. (Must be the same as the input)
- * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] out_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] out_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] input_width Input image width
- * @param[in] input_height Input image height
- * @param[in] scale_x The scale factor along x dimension
- * @param[in] scale_y The scale factor along y dimension
- */
-__kernel void scale_bilinear_nchw(
- IMAGE_DECLARATION(in),
- IMAGE_DECLARATION(out),
- const float input_width,
- const float input_height,
- const float scale_x,
- const float scale_y)
-{
- Image in = CONVERT_TO_IMAGE_STRUCT_NO_STEP(in);
- Image out = CONVERT_TO_IMAGE_STRUCT(out);
- const float2 r = (float2)(scale_x, scale_y);
- const float8 tc = transform_bilinear(get_current_coords(), r);
- vstore4(bilinear_interpolate_with_border(&in, tc, input_width, input_height, BORDER_SIZE), 0, (__global DATA_TYPE *)out.ptr);
-}
-
-#if defined(DEPTH_OUT)
-/** Performs scale on an image interpolating with the NEAREAST NEIGHBOUR method. Input and output are single channel F32. (NHWC)
- *
- * @note Sampling policy to used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
- * @note Output tensor's depth should be given as a preprocessor argument using -DDEPTH_OUT=size. e.g. -DDEPTH=16
- *
- * @param[in] in_ptr Pointer to the source image. Supported data types: U8/S16/F16/F32.
- * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] in_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] in_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in_stride_z Stride of the source image in Z dimension (in bytes)
- * @param[in] in_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] out_ptr Pointer to the destination image. Supported data types: same as @p in_ptr
- * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] out_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] out_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] out_stride_z Stride of the destination image in Z dimension (in bytes)
- * @param[in] out_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] input_width Input image width
- * @param[in] input_height Input image height
- * @param[in] scale_x The scale factor along x dimension
- * @param[in] scale_y The scale factor along y dimension
- */
-__kernel void scale_nearest_neighbour_nhwc(
- TENSOR4D_DECLARATION(in),
- TENSOR4D_DECLARATION(out),
- const float input_width,
- const float input_height,
- const float scale_x,
- const float scale_y)
-{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(in, 0);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT(out, DEPTH_OUT);
-
-#ifdef SAMPLING_POLICY_TOP_LEFT
- float new_x = get_global_id(1) * scale_x;
- float new_y = (get_global_id(2) % DEPTH_OUT) * scale_y;
-#elif SAMPLING_POLICY_CENTER
- float new_x = (get_global_id(1) + 0.5f) * scale_x;
- float new_y = ((get_global_id(2) % DEPTH_OUT) + 0.5f) * scale_y;
-#else /* SAMPLING_POLICY */
-#error("Unsupported sampling policy");
-#endif /* SAMPLING_POLICY */
-#ifdef ALIGN_CORNERS
- new_x = round(new_x);
- new_y = round(new_y);
-#endif /* ALIGN_CORNERS */
- const float clamped_x = clamp(new_x, 0.0f, input_width - 1);
- const float clamped_y = clamp(new_y, 0.0f, input_height - 1);
-
- *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x), convert_int(clamped_y), (get_global_id(2) / DEPTH_OUT)));
-}
-
-/** Performs scale on an image interpolating with the BILINEAR method. (NHWC)
- *
- * @note Sampling policy to be used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
- * @note If border mode replicate is used, is should be passed as -DBORDER_MODE_REPLICATE
- * @note Output tensor's depth should be given as a preprocessor argument using -DDEPTH_OUT=size. e.g. -DDEPTH=16
- * @note The value to be used at the edges of the images shoud be given as a preprocessor argument using -DCONSTANT_VALUE=value.
- *
- * @param[in] in_ptr Pointer to the source image. Supported data types: U8/S16/F16/F32.
- * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] in_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] in_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in_stride_z Stride of the source image in Z dimension (in bytes)
- * @param[in] in_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] out_ptr Pointer to the destination image. Supported data types: same as @p in_ptr
- * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] out_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] out_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] out_stride_z Stride of the destination image in Z dimension (in bytes)
- * @param[in] out_step_z dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] input_width Input image width
- * @param[in] input_height Input image height
- * @param[in] scale_x The scale factor along x dimension
- * @param[in] scale_y The scale factor along y dimension
- *
- */
-__kernel void scale_bilinear_nhwc(
- TENSOR4D_DECLARATION(in),
- TENSOR4D_DECLARATION(out),
- const float input_width,
- const float input_height,
- const float scale_x,
- const float scale_y)
-{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(in, 0);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT(out, DEPTH_OUT);
-
-#ifdef SAMPLING_POLICY_TOP_LEFT
- const float new_x = get_global_id(1) * scale_x;
- const float new_y = (get_global_id(2) % DEPTH_OUT) * scale_y;
-#elif SAMPLING_POLICY_CENTER
- const float new_x = (get_global_id(1) + 0.5f) * scale_x - 0.5f;
- const float new_y = ((get_global_id(2) % DEPTH_OUT) + 0.5f) * scale_y - 0.5f;
-#else /* SAMPLING_POLICY */
-#error("Unsupported sampling policy");
-#endif /* SAMPLING_POLICY */
-
- const float new_xf = floor(new_x);
- const float new_yf = floor(new_y);
- const float clamped_x = clamp(new_xf, 0.0f, input_width - 1);
- const float clamped_x1 = clamp(new_xf + 1, 0.0f, input_width - 1);
- const float clamped_y = clamp(new_yf, 0.0f, input_height - 1);
- const float clamped_y1 = clamp(new_yf + 1, 0.0f, input_height - 1);
-
-#ifndef BORDER_MODE_REPLICATE
- const bool check_x = (0.f <= new_xf && new_xf < input_width);
- const bool check_x1 = (-1.f <= new_xf && new_xf < input_width - 1);
- const bool check_y = (0.f <= new_yf && new_yf < input_height);
- const bool check_y1 = (-1.f <= new_yf && new_yf < input_height - 1);
- const float ins_0 = select((float)(CONSTANT_VALUE), (float)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x), convert_int(clamped_y),
- (get_global_id(2) / DEPTH_OUT)))),
- check_x && check_y);
- const float ins_1 = select((float)(CONSTANT_VALUE), (float)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x1), convert_int(clamped_y),
- (get_global_id(2) / DEPTH_OUT)))),
- check_x1 && check_y);
- const float ins_2 = select((float)(CONSTANT_VALUE), (float)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x), convert_int(clamped_y1),
- (get_global_id(2) / DEPTH_OUT)))),
- check_x && check_y1);
- const float ins_3 = select((float)(CONSTANT_VALUE), (float)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x1), convert_int(clamped_y1),
- (get_global_id(2) / DEPTH_OUT)))),
- check_x1 && check_y1);
- float4 ins = (float4)(ins_0, ins_1, ins_2, ins_3);
-#else /* BORDER_MODE_REPLICATE */
- float4 ins = (float4)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x), convert_int(clamped_y), (get_global_id(2) / DEPTH_OUT))),
- *((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x1), convert_int(clamped_y), (get_global_id(2) / DEPTH_OUT))),
- *((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x), convert_int(clamped_y1), (get_global_id(2) / DEPTH_OUT))),
- *((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x1), convert_int(clamped_y1), (get_global_id(2) / DEPTH_OUT))));
-#endif /* BORDER_MODE_REPLICATE */
-
- const float a = new_x - new_xf;
- const float b = 1.f - a;
- const float a1 = new_y - new_yf;
- const float b1 = 1.f - a1;
- const float fr = ((ins.s0 * b * b1) + (ins.s1 * a * b1) + (ins.s2 * b * a1) + (ins.s3 * a * a1));
-
- *((__global DATA_TYPE *)out.ptr) = CONVERT(fr, DATA_TYPE);
-}
-#endif /* defined(DEPTH_OUT) */ \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/scale_quantized.cl b/src/core/CL/cl_kernels/scale_quantized.cl
deleted file mode 100644
index 010e4ed57a..0000000000
--- a/src/core/CL/cl_kernels/scale_quantized.cl
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers_asymm.h"
-#include "warp_helpers_quantized.h"
-
-/** Transforms four 2D coordinates. This is used to map the output coordinates to the input coordinates.
- *
- * @param[in] coord 2D coordinates to transform.
- * @param[in] scale input/output scale ratio
- *
- * @return a float8 containing 4 2D transformed values in the input image.
- */
-inline const float8 transform_bilinear_quantized(const float2 coord, const float2 scale)
-{
- const float4 in_x_coords = (float4)(coord.s0, 1 + coord.s0, 2 + coord.s0, 3 + coord.s0);
-#ifdef SAMPLING_POLICY_TOP_LEFT
- const float4 new_x = in_x_coords * (float4)(scale.s0);
- const float4 new_y = (float4)(coord.s1 * scale.s1);
- return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
-#elif SAMPLING_POLICY_CENTER
- const float4 new_x = (in_x_coords + ((float4)(0.5f))) * (float4)(scale.s0) - (float4)(0.5f);
- const float4 new_y = (float4)((coord.s1 + 0.5f) * scale.s1 - 0.5f);
- return (float8)(new_x.s0, new_y.s0, new_x.s1, new_y.s1, new_x.s2, new_y.s2, new_x.s3, new_y.s3);
-#else /* SAMPLING_POLICY */
-#error("Unsupported sampling policy");
-#endif /* SAMPLING_POLICY */
-}
-
-/** Performs an affine transformation on an image interpolating with the BILINEAR method.
- *
- * @note Sampling policy to used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
- * @note Scale value for QASYMM8 data type to used is passed as -DSCALE=<VALUE> e.g. -DSCALE=0.5
- * @note Offset value for QASYMM8 data type to used is passed as -DOFFSET=<VALUE> e.g. -DOFFSET=1
- *
- * @param[in] in_ptr Pointer to the source image. Supported data types: QASYMM8.
- * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] in_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] in_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] out_ptr Pointer to the destination image. Supported data types: U8, S16. (Must be the same as the input)
- * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] out_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] out_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] input_width Input image width
- * @param[in] input_height Input image height
- * @param[in] scale_x The scale factor along x dimension
- * @param[in] scale_y The scale factor along y dimension
- */
-__kernel void scale_bilinear_quantized_nchw(
- IMAGE_DECLARATION(in),
- IMAGE_DECLARATION(out),
- const float input_width,
- const float input_height,
- const float scale_x,
- const float scale_y)
-{
- Image in = CONVERT_TO_IMAGE_STRUCT_NO_STEP(in);
- Image out = CONVERT_TO_IMAGE_STRUCT(out);
- const float2 r = (float2)(scale_x, scale_y);
- const float8 tc = transform_bilinear_quantized(get_current_coords_quantized(), r);
- vstore4(bilinear_interpolate_with_border_quantized(&in, tc, input_width, input_height, BORDER_SIZE, SCALE, OFFSET), 0, (__global DATA_TYPE *)out.ptr);
-}
-
-#if defined(DEPTH_OUT)
-/** Performs scale on an image interpolating with the BILINEAR method. (NHWC)
- *
- * @note Sampling policy to be used is passed as -DSAMPLING_POLICY_(TYPE) e.g. -DSAMPLING_POLICY_TOP_LEFT
- * @note Scale value for QASYMM8 data type to used is passed as -DSCALE=<VALUE> e.g. -DSCALE=0.5
- * @note Offset value for QASYMM8 data type to used is passed as -DOFFSET=<VALUE> e.g. -DOFFSET=1
- * @note If border mode replicate is used, is should be passed as -DBORDER_MODE_REPLICATE
- * @note Output tensor's depth should be given as a preprocessor argument using -DDEPTH_OUT=size. e.g. -DDEPTH=16
- * @note The value to be used at the edges of the images shoud be given as a preprocessor argument using -DCONSTANT_VALUE=value.
- *
- * @param[in] in_ptr Pointer to the source image. Supported data types: QASYMM8.
- * @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] in_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] in_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] in_stride_z Stride of the source image in Z dimension (in bytes)
- * @param[in] in_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] out_ptr Pointer to the destination image. Supported data types: same as @p in_ptr
- * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] out_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] out_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] out_stride_z Stride of the destination image in Z dimension (in bytes)
- * @param[in] out_step_z dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] input_width Input image width
- * @param[in] input_height Input image height
- * @param[in] scale_x The scale factor along x dimension
- * @param[in] scale_y The scale factor along y dimension
- * @param[in] constant_border_value Constant border value to use
- */
-__kernel void scale_bilinear_quantized_nhwc(
- TENSOR4D_DECLARATION(in),
- TENSOR4D_DECLARATION(out),
- const float input_width,
- const float input_height,
- const float scale_x,
- const float scale_y)
-{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(in, 0);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT(out, DEPTH_OUT);
-
-#ifdef SAMPLING_POLICY_TOP_LEFT
- const float new_x = get_global_id(1) * scale_x;
- const float new_y = (get_global_id(2) % DEPTH_OUT) * scale_y;
-#elif SAMPLING_POLICY_CENTER
- const float new_x = (get_global_id(1) + 0.5f) * scale_x - 0.5f;
- const float new_y = ((get_global_id(2) % DEPTH_OUT) + 0.5f) * scale_y - 0.5f;
-#else /* SAMPLING_POLICY */
-#error("Unsupported sampling policy");
-#endif /* SAMPLING_POLICY */
-
- const float new_xf = floor(new_x);
- const float new_yf = floor(new_y);
- const float clamped_x = clamp(new_xf, 0.0f, input_width - 1);
- const float clamped_x1 = clamp(new_xf + 1, 0.0f, input_width - 1);
- const float clamped_y = clamp(new_yf, 0.0f, input_height - 1);
- const float clamped_y1 = clamp(new_yf + 1, 0.0f, input_height - 1);
-
-#ifndef BORDER_MODE_REPLICATE
- const bool check_x = (0.f <= new_xf && new_xf < input_width);
- const bool check_x1 = (-1.f <= new_xf && new_xf < input_width - 1);
- const bool check_y = (0.f <= new_yf && new_yf < input_height);
- const bool check_y1 = (-1.f <= new_yf && new_yf < input_height - 1);
- const int ins_0 = select((int)(CONSTANT_VALUE), (int)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x), convert_int(clamped_y),
- (get_global_id(2) / DEPTH_OUT)))),
- check_x && check_y);
- const int ins_1 = select((int)(CONSTANT_VALUE), (int)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x1), convert_int(clamped_y),
- (get_global_id(2) / DEPTH_OUT)))),
- check_x1 && check_y);
- const int ins_2 = select((int)(CONSTANT_VALUE), (int)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x), convert_int(clamped_y1),
- (get_global_id(2) / DEPTH_OUT)))),
- check_x && check_y1);
- const int ins_3 = select((int)(CONSTANT_VALUE), (int)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x1), convert_int(clamped_y1),
- (get_global_id(2) / DEPTH_OUT)))),
- check_x1 && check_y1);
- int4 ins = (int4)(ins_0, ins_1, ins_2, ins_3);
-#else /* BORDER_MODE_REPLICATE */
- int4 ins = (int4)(*((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x), convert_int(clamped_y), (get_global_id(2) / DEPTH_OUT))),
- *((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x1), convert_int(clamped_y), (get_global_id(2) / DEPTH_OUT))),
- *((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x), convert_int(clamped_y1), (get_global_id(2) / DEPTH_OUT))),
- *((__global DATA_TYPE *)tensor4D_offset(&in, get_global_id(0), convert_int(clamped_x1), convert_int(clamped_y1), (get_global_id(2) / DEPTH_OUT))));
-#endif /* BORDER_MODE_REPLICATE */
-
- const float a = new_x - new_xf;
- const float b = 1.f - a;
- const float a1 = new_y - new_yf;
- const float b1 = 1.f - a1;
- const float4 insf32 = convert_float4(ins - (int4)OFFSET) * (float4)SCALE;
-
- const float fr = ((insf32.s0 * b * b1) + (insf32.s1 * a * b1) + (insf32.s2 * b * a1) + (insf32.s3 * a * a1));
-
- DATA_TYPE res = CONVERT_SAT(convert_int_sat_rtp(fr / SCALE) + OFFSET, DATA_TYPE);
-
- *((__global DATA_TYPE *)out.ptr) = res;
-}
-#endif /* defined(DEPTH_OUT) */ \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/sobel_filter.cl b/src/core/CL/cl_kernels/sobel_filter.cl
deleted file mode 100644
index 7983734fc4..0000000000
--- a/src/core/CL/cl_kernels/sobel_filter.cl
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Copyright (c) 2016, 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-/***********************************************/
-/* Begin implementation of Sobel3x3 filter */
-/***********************************************/
-
-/** This OpenCL kernel that computes a Sobel3x3 filter.
- *
- * @attention To enable computation of the X gradient -DGRAD_X must be passed at compile time, while computation of the Y gradient
- * is performed when -DGRAD_Y is used. You can use both when computation of both gradients is required.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: U8
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] dst_gx_ptr Pointer to the destination image. Supported data types: S16
- * @param[in] dst_gx_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gx_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gx_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gx_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gx_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[out] dst_gy_ptr Pointer to the destination image. Supported data types: S16
- * @param[in] dst_gy_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gy_step_x dst_gy_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gy_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gy_step_y dst_gy_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gy_offset_first_element_in_bytes The offset of the first element in the destination image
- */
-__kernel void sobel3x3(
- IMAGE_DECLARATION(src)
-#ifdef GRAD_X
- ,
- IMAGE_DECLARATION(dst_gx)
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- ,
- IMAGE_DECLARATION(dst_gy)
-#endif /* GRAD_Y */
-)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
-#ifdef GRAD_X
- Image dst_gx = CONVERT_TO_IMAGE_STRUCT(dst_gx);
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- Image dst_gy = CONVERT_TO_IMAGE_STRUCT(dst_gy);
-#endif /* GRAD_Y */
-
- // Output pixels
-#ifdef GRAD_X
- short8 gx = (short8)0;
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- short8 gy = (short8)0;
-#endif /* GRAD_Y */
-
- // Row0
- uchar16 temp = vload16(0, offset(&src, -1, -1));
- short8 left = convert_short8(temp.s01234567);
- short8 middle = convert_short8(temp.s12345678);
- short8 right = convert_short8(temp.s23456789);
-#ifdef GRAD_X
- gx += left * (short8)(-1);
- gx += right * (short8)(+1);
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- gy += left * (short8)(-1);
- gy += middle * (short8)(-2);
- gy += right * (short8)(-1);
-#endif /* GRAD_Y */
-
- // Row1
- temp = vload16(0, offset(&src, -1, 0));
- left = convert_short8(temp.s01234567);
- right = convert_short8(temp.s23456789);
-#ifdef GRAD_X
- gx += left * (short8)(-2);
- gx += right * (short8)(+2);
-#endif /* GRAD_X */
-
- // Row2
- temp = vload16(0, offset(&src, -1, 1));
- left = convert_short8(temp.s01234567);
- middle = convert_short8(temp.s12345678);
- right = convert_short8(temp.s23456789);
-#ifdef GRAD_X
- gx += left * (short8)(-1);
- gx += right * (short8)(+1);
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- gy += left * (short8)(+1);
- gy += middle * (short8)(+2);
- gy += right * (short8)(+1);
-#endif /* GRAD_Y */
-
- // Store results
-#ifdef GRAD_X
- vstore8(gx, 0, ((__global short *)dst_gx.ptr));
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- vstore8(gy, 0, ((__global short *)dst_gy.ptr));
-#endif /* GRAD_Y */
-}
-
-/**********************************************/
-/* End implementation of Sobel3x3 filter */
-/**********************************************/
-
-/***********************************************/
-/* Begin implementation of Sobel5x5 filter */
-/***********************************************/
-
-/** Compute a 1D horizontal sobel filter 1x5 for 8 bytes assuming the input is made of 1 channel of 1 byte (i.e 8 pixels).
- *
- * @param[in] src Pointer to source image.
- * @param[in] left1_coeff_gx Weight of the most left pixel for gx
- * @param[in] left2_coeff_gx Weight of the left pixel for gx
- * @param[in] middle_coeff_gx Weight of the middle pixel for gx
- * @param[in] right1_coeff_gx Weight of the right pixel for gx
- * @param[in] right2_coeff_gx Weight of the most right pixel for gx
- * @param[in] left1_coeff_gy Weight of the most left pixel for gy
- * @param[in] left2_coeff_gy Weight of the left pixel for gy
- * @param[in] middle_coeff_gy Weight of the middle pixel for gy
- * @param[in] right1_coeff_gy Weight of the right pixel for gy
- * @param[in] right2_coeff_gy Weight of the most right pixel for gy
- *
- * @return a short16 containing short8 gx and short8 gy values.
- */
-short16 sobel1x5(
- Image *src,
- const short left1_coeff_gx,
- const short left2_coeff_gx,
- const short middle_coeff_gx,
- const short right1_coeff_gx,
- const short right2_coeff_gx,
- const short left1_coeff_gy,
- const short left2_coeff_gy,
- const short middle_coeff_gy,
- const short right1_coeff_gy,
- const short right2_coeff_gy)
-{
- uchar16 temp = vload16(0, offset(src, -2, 0));
- short8 gx = 0;
- short8 gy = 0;
- short8 val;
-
- val = convert_short8(temp.s01234567);
- gx += val * (short8)left1_coeff_gx;
- gy += val * (short8)left1_coeff_gy;
-
- val = convert_short8(temp.s12345678);
- gx += val * (short8)left2_coeff_gx;
- gy += val * (short8)left2_coeff_gy;
-
- val = convert_short8(temp.s23456789);
- gx += val * (short8)middle_coeff_gx;
- gy += val * (short8)middle_coeff_gy;
-
- val = convert_short8(temp.s3456789a);
- gx += val * (short8)right1_coeff_gx;
- gy += val * (short8)right1_coeff_gy;
-
- val = convert_short8(temp.s456789ab);
- gx += val * (short8)right2_coeff_gx;
- gy += val * (short8)right2_coeff_gy;
-
- return (short16)(gx, gy);
-}
-
-/** Compute a 1D vertical sobel filter 5x1 for 8 bytes assuming the input is made of 1 channel of 1 byte (i.e 8 pixels).
- *
- * @param[in] src Pointer to source image.
- * @param[in] up1_coeff Weight of the most up pixel
- * @param[in] up2_coeff Weight of the up pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] down1_coeff Weight of the down pixel
- * @param[in] down2_coeff Weight of the most down pixel
- *
- * @return a short8 containing 8 convoluted values.
- */
-short8 sobel5x1(
- Image *src,
- const short up1_coeff,
- const short up2_coeff,
- const short middle_coeff,
- const short down1_coeff,
- const short down2_coeff)
-{
- short8 val;
- short8 out = (short8)0;
-
- val = vload8(0, (__global short *)offset(src, 0, -2));
- out += val * (short8)up1_coeff;
-
- val = vload8(0, (__global short *)offset(src, 0, -1));
- out += val * (short8)up2_coeff;
-
- val = vload8(0, (__global short *)offset(src, 0, 0));
- out += val * (short8)middle_coeff;
-
- val = vload8(0, (__global short *)offset(src, 0, 1));
- out += val * (short8)down1_coeff;
-
- val = vload8(0, (__global short *)offset(src, 0, 2));
- out += val * (short8)down2_coeff;
-
- return (short8)(out);
-}
-
-/** Apply a 1x5 sobel matrix to a single channel U8 input image and output two temporary channel S16 images.
- *
- * @attention To enable computation of the X gradient -DGRAD_X must be passed at compile time, while computation of the Y gradient
- * is performed when -DGRAD_Y is used. You can use both when computation of both gradients is required.
- *
- * @param[in] src_ptr Pointer to the source image.. Supported data types: U8
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] dst_gx_ptr Pointer to the destination image.. Supported data types: S16
- * @param[in] dst_gx_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gx_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gx_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gx_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gx_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[out] dst_gy_ptr Pointer to the destination image. Supported data types: S16
- * @param[in] dst_gy_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gy_step_x dst_gy_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gy_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gy_step_y dst_gy_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gy_offset_first_element_in_bytes The offset of the first element in the destination image
- */
-__kernel void sobel_separable1x5(
- IMAGE_DECLARATION(src)
-#ifdef GRAD_X
- ,
- IMAGE_DECLARATION(dst_gx)
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- ,
- IMAGE_DECLARATION(dst_gy)
-#endif /* GRAD_Y */
-)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
-#ifdef GRAD_X
- Image dst_gx = CONVERT_TO_IMAGE_STRUCT(dst_gx);
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- Image dst_gy = CONVERT_TO_IMAGE_STRUCT(dst_gy);
-#endif /* GRAD_Y */
-
- // Output pixels
- short16 gx_gy = sobel1x5(&src,
- -1, -2, 0, 2, 1,
- 1, 4, 6, 4, 1);
-
- // Store result in dst
-#ifdef GRAD_X
- vstore8(gx_gy.s01234567, 0, ((__global short *)dst_gx.ptr));
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- vstore8(gx_gy.s89ABCDEF, 0, ((__global short *)dst_gy.ptr));
-#endif /* GRAD_Y */
-}
-
-/** Apply a 5x1 convolution matrix to two single channel S16 input temporary images
- * and output two single channel S16 images.
- *
- * @attention To enable computation of the X gradient -DGRAD_X must be passed at compile time, while computation of the Y gradient
- * is performed when -DGRAD_Y is used. You can use both when computation of both gradients is required.
- *
- * @param[in] src_x_ptr Pointer to the source image.. Supported data types: S16
- * @param[in] src_x_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_x_step_x src_x_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_x_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_x_step_y src_x_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_x_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] dst_gx_ptr Pointer to the destination image. Supported data types: S16
- * @param[in] dst_gx_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gx_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gx_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gx_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gx_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] src_y_ptr Pointer to the source image. Supported data types: S16
- * @param[in] src_y_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_y_step_x src_y_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_y_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_y_step_y src_y_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_y_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] dst_gy_ptr Pointer to the destination image. Supported data types: S16
- * @param[in] dst_gy_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gy_step_x dst_gy_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gy_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gy_step_y dst_gy_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gy_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] dummy Dummy parameter to easy conditional inclusion
- */
-__kernel void sobel_separable5x1(
-#ifdef GRAD_X
- IMAGE_DECLARATION(src_x),
- IMAGE_DECLARATION(dst_gx),
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- IMAGE_DECLARATION(src_y),
- IMAGE_DECLARATION(dst_gy),
-#endif /* GRAD_Y */
- int dummy)
-{
-#ifdef GRAD_X
- Image src_x = CONVERT_TO_IMAGE_STRUCT(src_x);
- Image dst_gx = CONVERT_TO_IMAGE_STRUCT(dst_gx);
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- Image src_y = CONVERT_TO_IMAGE_STRUCT(src_y);
- Image dst_gy = CONVERT_TO_IMAGE_STRUCT(dst_gy);
-#endif /* GRAD_Y */
-
-#ifdef GRAD_X
- short8 gx = sobel5x1(&src_x,
- 1, 4, 6, 4, 1);
- vstore8(gx, 0, ((__global short *)dst_gx.ptr));
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- short8 gy = sobel5x1(&src_y,
- -1, -2, 0, 2, 1);
- vstore8(gy, 0, ((__global short *)dst_gy.ptr));
-#endif /* GRAD_Y */
-}
-
-/**********************************************/
-/* End implementation of Sobel5x5 filter */
-/**********************************************/
-
-/***********************************************/
-/* Begin implementation of Sobel7x7 filter */
-/***********************************************/
-
-/* Sobel 1x7 horizontal X / 7x1 vertical Y coefficients */
-#define X0 -1
-#define X1 -4
-#define X2 -5
-#define X3 0
-#define X4 5
-#define X5 4
-#define X6 1
-
-/* Sobel 1x7 vertical X / 7x1 horizontal Y coefficients */
-#define Y0 1
-#define Y1 6
-#define Y2 15
-#define Y3 20
-#define Y4 15
-#define Y5 6
-#define Y6 1
-
-/* Calculates single horizontal iteration. */
-#define SOBEL1x1_HOR(src, gx, gy, idx) \
- { \
- int8 val = convert_int8(vload8(0, offset(src, idx - 3, 0))); \
- gx += val * X##idx; \
- gy += val * Y##idx; \
- }
-
-/* Calculates single vertical iteration. */
-#define SOBEL1x1_VERT(src, g, direction, idx) \
- { \
- int8 val = vload8(0, (__global int *)offset(src, 0, idx - 3)); \
- g += val * (int8)direction##idx; \
- }
-
-/* Calculates a 1x7 horizontal iteration. */
-#define SOBEL1x7(ptr, gx, gy) \
- SOBEL1x1_HOR(ptr, gx, gy, 0) \
- SOBEL1x1_HOR(ptr, gx, gy, 1) \
- SOBEL1x1_HOR(ptr, gx, gy, 2) \
- SOBEL1x1_HOR(ptr, gx, gy, 3) \
- SOBEL1x1_HOR(ptr, gx, gy, 4) \
- SOBEL1x1_HOR(ptr, gx, gy, 5) \
- SOBEL1x1_HOR(ptr, gx, gy, 6)
-
-/* Calculates a 7x1 vertical iteration. */
-#define SOBEL7x1(ptr, g, direction) \
- SOBEL1x1_VERT(ptr, g, direction, 0) \
- SOBEL1x1_VERT(ptr, g, direction, 1) \
- SOBEL1x1_VERT(ptr, g, direction, 2) \
- SOBEL1x1_VERT(ptr, g, direction, 3) \
- SOBEL1x1_VERT(ptr, g, direction, 4) \
- SOBEL1x1_VERT(ptr, g, direction, 5) \
- SOBEL1x1_VERT(ptr, g, direction, 6)
-
-/** Apply a 1x7 sobel matrix to a single channel U8 input image and output two temporary channel S16 images and leave the borders undefined.
- *
- * @attention To enable computation of the X gradient -DGRAD_X must be passed at compile time, while computation of the Y gradient
- * is performed when -DGRAD_Y is used. You can use both when computation of both gradients is required.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: U8
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] dst_gx_ptr Pointer to the destination image. Supported data types: S32
- * @param[in] dst_gx_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gx_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gx_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gx_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gx_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[out] dst_gy_ptr Pointer to the destination image. Supported data types: S32
- * @param[in] dst_gy_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gy_step_x dst_gy_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gy_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gy_step_y dst_gy_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gy_offset_first_element_in_bytes The offset of the first element in the destination image
- */
-__kernel void sobel_separable1x7(
- IMAGE_DECLARATION(src)
-#ifdef GRAD_X
- ,
- IMAGE_DECLARATION(dst_gx)
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- ,
- IMAGE_DECLARATION(dst_gy)
-#endif /* GRAD_Y */
-)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
-#ifdef GRAD_X
- Image dst_gx = CONVERT_TO_IMAGE_STRUCT(dst_gx);
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- Image dst_gy = CONVERT_TO_IMAGE_STRUCT(dst_gy);
-#endif /* GRAD_Y */
- int8 gx = (int8)0;
- int8 gy = (int8)0;
-
- SOBEL1x7(&src, gx, gy);
-
- // Store result in dst
-#ifdef GRAD_X
- vstore8(gx, 0, ((__global int *)dst_gx.ptr));
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- vstore8(gy, 0, ((__global int *)dst_gy.ptr));
-#endif /* GRAD_Y */
-}
-
-/** Apply a 7x1 convolution matrix to two single channel S16 input temporary images and output two single channel S16 images and leave the borders undefined.
- *
- * @attention To enable computation of the X gradient -DGRAD_X must be passed at compile time, while computation of the Y gradient
- * is performed when -DGRAD_Y is used. You can use both when computation of both gradients is required.
- *
- * @param[in] src_x_ptr Pointer to the source image. Supported data types: S32
- * @param[in] src_x_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_x_step_x src_x_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_x_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_x_step_y src_x_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_x_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] dst_gx_ptr Pointer to the destination image. Supported data types: S16
- * @param[in] dst_gx_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gx_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gx_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gx_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gx_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] src_y_ptr Pointer to the source image. Supported data types: S32
- * @param[in] src_y_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_y_step_x src_y_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_y_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_y_step_y src_y_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_y_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] dst_gy_ptr Pointer to the destination image. Supported data types: S16
- * @param[in] dst_gy_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] dst_gy_step_x dst_gy_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_gy_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] dst_gy_step_y dst_gy_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_gy_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in] dummy Dummy parameter to easy conditional inclusion
- */
-__kernel void sobel_separable7x1(
-#ifdef GRAD_X
- IMAGE_DECLARATION(src_x),
- IMAGE_DECLARATION(dst_gx),
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- IMAGE_DECLARATION(src_y),
- IMAGE_DECLARATION(dst_gy),
-#endif /* GRAD_Y */
- int dummy)
-{
-#ifdef GRAD_X
- Image src_x = CONVERT_TO_IMAGE_STRUCT(src_x);
- Image dst_gx = CONVERT_TO_IMAGE_STRUCT(dst_gx);
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- Image src_y = CONVERT_TO_IMAGE_STRUCT(src_y);
- Image dst_gy = CONVERT_TO_IMAGE_STRUCT(dst_gy);
-#endif /* GRAD_Y */
-
- // Output pixels
-#ifdef GRAD_X
- int8 gx = 0;
- SOBEL7x1(&src_x, gx, Y);
- vstore8(gx, 0, (__global int *)dst_gx.ptr);
-#endif /* GRAD_X */
-#ifdef GRAD_Y
- int8 gy = 0;
- SOBEL7x1(&src_y, gy, X);
- vstore8(gy, 0, (__global int *)dst_gy.ptr);
-#endif /* GRAD_Y */
-}
-
-/**********************************************/
-/* End implementation of Sobel7x7 filter */
-/**********************************************/
diff --git a/src/core/CL/cl_kernels/softmax_layer.cl b/src/core/CL/cl_kernels/softmax_layer.cl
deleted file mode 100644
index 01f5de47cf..0000000000
--- a/src/core/CL/cl_kernels/softmax_layer.cl
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#if defined(DATA_TYPE) && defined(MIN_VALUE) && defined(VECTOR_SIZE) && defined(VECTOR_SIZE_LEFTOVER)
-
-/** Divides all the values of the input tensor by the sum calculated from softmax_layer_shift_exp_sum kernel.
- *
- * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE, e.g. -DDATA_TYPE=float
- * @note The zero value for the given data type must be given as a preprocessor argument using -DMIN_VALUE, e.g. -DMIN_VALUE=0
- * @note Vector size should be given as a preprocessor argument using -DVECTOR_SIZE=size. e.g. -DVECTOR_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVECTOR_SIZE_LEFTOVER. e.g. -DVECTOR_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VECTOR_SIZE
- * @note In case of log softmax, -DLOG_SOFTMAX must be passed.
- *
- * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes)
- * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes)
- * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] sum_stride_z Stride of the sum values tensor in Z dimension (in bytes)
- * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor
- * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void softmax_layer_norm(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(sum),
- TENSOR3D_DECLARATION(dst))
-{
- const int x_offs = max((int)(get_global_id(0) * VECTOR_SIZE - (VECTOR_SIZE - VECTOR_SIZE_LEFTOVER) % VECTOR_SIZE), 0) * sizeof(DATA_TYPE);
-
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
-
- Image sum = CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(sum);
-
- // Load max value of 1D logits vector (row)
- DATA_TYPE sum_val = *((__global DATA_TYPE *)offset(&sum, 0, get_global_id(1)));
- VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
- data0 = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)src_addr);
-
-#if defined(LOG_SOFTMAX)
- sum_val = log(sum_val);
- data0 -= sum_val;
-#else // defined(LOG_SOFTMAX)
- data0 /= sum_val;
-#endif // defined(LOG_SOFTMAX)
-
- STORE_VECTOR_SELECT(data, DATA_TYPE, dst_addr, VECTOR_SIZE, VECTOR_SIZE_LEFTOVER, VECTOR_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
-}
-
-#if defined(SRC_WIDTH) && defined(LOG_VECTOR_SIZE) && defined(MINVAL)
-
-/* Number of workitems in dimension 0. */
-#if !defined(GRID_SIZE)
-#define GRID_SIZE 1
-#endif /* !defined(GRID_SIZE) */
-
-#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
-#define SELECT_TYPE SELECT_VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
-
-/** Identifies the maximum value across the 1st dimension and shifts the values of the input tensor by this maximum value,
- * then gets the exponent of each element as sums all elements across each row.
- *
- * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE, e.g. -DDATA_TYPE=float
- * @note The zero value for the given data type must be given as a preprocessor argument using -DMIN_VALUE, e.g. -DMIN_VALUE=0
- * @note Vector size should be given as a preprocessor argument using -DVECTOR_SIZE=size. e.g. -DVECTOR_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVECTOR_SIZE_LEFTOVER. e.g. -DVECTOR_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VECTOR_SIZE
- * @note In case the input is not a multiple of VECTOR_SIZE (2,4,8,16) -DNON_MULTIPLE_OF_VECTOR_SIZE must be passed.
- * @note Beta can be optionally passed at compile time using -DBETA (by default, it is 1.0).
- * @note In case of log softmax, -DLOG_SOFTMAX must be passed.
- * @note Based on the data type, the minimum possible value must be passed using -DMINVAL. For float it should be defined as -FLT_MAX, while for half it should be -HALF_MAX
- *
- * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] maxo_ptr Pointer to the max values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] maxo_stride_x Stride of the max values tensor in X dimension (in bytes)
- * @param[in] maxo_step_x max_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] maxo_stride_y Stride of the max values tensor in Y dimension (in bytes)
- * @param[in] maxo_step_y max_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] maxo_stride_z Stride of the max values tensor in Z dimension (in bytes)
- * @param[in] maxo_step_z max_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] maxo_offset_first_element_in_bytes The offset of the first element in the max values tensor
- * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[out] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes)
- * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes)
- * @param[in] sum_step_y sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_stride_z Stride of the sum values tensor in Z dimension (in bytes)
- * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor
- */
-__kernel void softmax_layer_max_shift_exp_sum_serial(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(maxo),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(sum))
-{
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
-
- Image maxo = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(maxo);
- Image sum = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(sum);
-
-#ifdef BETA
- // Initialize beta
- VEC_TYPE beta = (VEC_TYPE)BETA;
-#endif /* BETA */
-
- // Initialize local maximum
- VEC_TYPE max_val_vec = (VEC_TYPE)(MINVAL);
-
-#ifdef NON_MULTIPLE_OF_VECTOR_SIZE
- VEC_TYPE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)src_addr);
- SELECT_TYPE widx = (SELECT_TYPE)VECTOR_SIZE_LEFTOVER > VEC_OFFS(SELECT_DATA_TYPE(DATA_TYPE), VECTOR_SIZE);
- max_val_vec = max(max_val_vec, select((VEC_TYPE)(MINVAL), data, widx));
-#endif /* NON_MULTIPLE_OF_VECTOR_SIZE */
-
- for(uint i = VECTOR_SIZE_LEFTOVER; i < SRC_WIDTH; i += VECTOR_SIZE)
- {
- VEC_TYPE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + i * sizeof(DATA_TYPE)));
- max_val_vec = max(data, max_val_vec);
- }
-
- // Perform max reduction
- DATA_TYPE max_val = MAX_REDUCE(max_val_vec, VECTOR_SIZE);
- *((__global DATA_TYPE *)maxo.ptr) = max_val;
-
- /* Second section */
-
- // Set sum vector
- VEC_TYPE sum1D = 0;
-
-#ifdef NON_MULTIPLE_OF_VECTOR_SIZE
- data -= max_val;
-#ifdef BETA
- data *= beta;
-#endif /* BETA */
-#ifdef LOG_SOFTMAX
- VSTORE_PARTIAL(VECTOR_SIZE, VECTOR_SIZE_LEFTOVER)
- (data, 0, (__global DATA_TYPE *)dst_addr);
- data = exp(data);
- data = select(0, data, widx);
-#else /* LOG_SOFTMAX */
- data = exp(data);
- data = select(0, data, widx);
- VSTORE_PARTIAL(VECTOR_SIZE, VECTOR_SIZE_LEFTOVER)
- (data, 0, (__global DATA_TYPE *)dst_addr);
-#endif /* LOG_SOFTMAX */
- sum1D += data;
-#endif /* NON_MULTIPLE_OF_VECTOR_SIZE */
-
- // Shift values, exp and sum
- for(uint i = VECTOR_SIZE_LEFTOVER; i < SRC_WIDTH; i += VECTOR_SIZE)
- {
- VEC_TYPE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + i * sizeof(DATA_TYPE)));
- data -= max_val;
-#ifdef BETA
- data *= beta;
-#endif /* BETA */
-#ifdef LOG_SOFTMAX
- VSTORE(VECTOR_SIZE)
- (data, 0, (__global DATA_TYPE *)(dst_addr + i * sizeof(DATA_TYPE)));
- data = exp(data);
-#else /* LOG_SOFTMAX */
- data = exp(data);
- VSTORE(VECTOR_SIZE)
- (data, 0, (__global DATA_TYPE *)(dst_addr + i * sizeof(DATA_TYPE)));
-#endif /* LOG_SOFTMAX */
- sum1D += data;
- }
-
- // Perform sum reduction
- *((__global DATA_TYPE *)sum.ptr) = SUM_REDUCE(sum1D, VECTOR_SIZE);
-}
-
-/** Identifies the maximum value across the 1st dimension and shifts the values of the input tensor by this maximum value,
- * then gets the exponent of each element as sums all elements across each row.
- *
- * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE, e.g. -DDATA_TYPE=float
- * @note The zero value for the given data type must be given as a preprocessor argument using -DMIN_VALUE, e.g. -DMIN_VALUE=0
- * @note Vector size should be given as a preprocessor argument using -DVECTOR_SIZE=size. e.g. -DVECTOR_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVECTOR_SIZE_LEFTOVER. e.g. -DVECTOR_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VECTOR_SIZE
- * @note In case the input is not a multiple of VECTOR_SIZE (2,4,8,16) -DNON_MULTIPLE_OF_VECTOR_SIZE must be passed.
- * @note Beta can be optionally passed at compile time using -DBETA (by default, it is 1.0).
- * @note In case of log softmax, -DLOG_SOFTMAX must be passed.
- * @note Based on the data type, the minimum possible value must be passed using -DMINVAL. For float it should be defined as -FLT_MAX, while for half it should be -HALF_MAX
- *
- * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] maxo_ptr Pointer to the max values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] maxo_stride_x Stride of the max values tensor in X dimension (in bytes)
- * @param[in] maxo_step_x max_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] maxo_stride_y Stride of the max values tensor in Y dimension (in bytes)
- * @param[in] maxo_step_y max_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] maxo_stride_z Stride of the max values tensor in Z dimension (in bytes)
- * @param[in] maxo_step_z max_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] maxo_offset_first_element_in_bytes The offset of the first element in the max values tensor
- * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[out] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes)
- * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes)
- * @param[in] sum_step_y sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_stride_z Stride of the sum values tensor in Z dimension (in bytes)
- * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor
- */
-__kernel void softmax_layer_max_shift_exp_sum_parallel(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(maxo),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(sum))
-{
- const uint lid = get_local_id(0);
- const uint x_offs = (VECTOR_SIZE_LEFTOVER + lid * VECTOR_SIZE) * sizeof(DATA_TYPE);
-
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
-
- Image maxo = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(maxo);
- Image sum = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(sum);
-
-#ifdef BETA
- // Initialize beta
- VEC_TYPE beta = (VEC_TYPE)BETA;
-#endif /* BETA */
-
- // Define one temporary vector per work-item.
- __local VEC_TYPE tmp_local[GRID_SIZE];
- __local DATA_TYPE max_local;
-
- VEC_TYPE max_val_vec = (VEC_TYPE)(MINVAL);
-
- // Number of iterations per work-item.
- const uint width = (SRC_WIDTH / GRID_SIZE) >> LOG_VECTOR_SIZE;
- // Calculate max of row
- uint i = 0;
- for(; i < width; ++i)
- {
- VEC_TYPE data_max = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- max_val_vec = max(data_max, max_val_vec);
- }
-#ifdef NON_MULTIPLE_OF_GRID_SIZE
- // How many work-items needed to complete the computation.
- //TODO: Optimize this calculation (avoid %).
- int boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
- if(lid < boundary_workitems)
- {
- VEC_TYPE data_max = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- max_val_vec = max(data_max, max_val_vec);
- }
-#ifdef NON_MULTIPLE_OF_VECTOR_SIZE
- SELECT_TYPE widx;
- if(lid == 0)
- {
- // Handle non multiple of 4
- VEC_TYPE data_max = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr - VECTOR_SIZE_LEFTOVER * sizeof(DATA_TYPE)));
- widx = (SELECT_TYPE)VECTOR_SIZE_LEFTOVER > VEC_OFFS(SELECT_DATA_TYPE(DATA_TYPE), VECTOR_SIZE);
- max_val_vec = max(max_val_vec, select((VEC_TYPE)(MINVAL), data_max, widx));
- }
-#endif /* NON_MULTIPLE_OF_VECTOR_SIZE */
-#endif /* NON_MULTIPLE_OF_GRID_SIZE */
- tmp_local[lid] = max_val_vec;
-
- barrier(CLK_LOCAL_MEM_FENCE);
-
- if(GRID_SIZE >= 256)
- {
- if(lid < 128)
- {
- tmp_local[lid] = max(tmp_local[lid + 128], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 128)
- {
- if(lid < 64)
- {
- tmp_local[lid] = max(tmp_local[lid + 64], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 64)
- {
- if(lid < 32)
- {
- tmp_local[lid] = max(tmp_local[lid + 32], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 32)
- {
- if(lid < 16)
- {
- tmp_local[lid] = max(tmp_local[lid + 16], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 16)
- {
- if(lid < 8)
- {
- tmp_local[lid] = max(tmp_local[lid + 8], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 8)
- {
- if(lid < 4)
- {
- tmp_local[lid] = max(tmp_local[lid + 4], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 4)
- {
- if(lid < 2)
- {
- tmp_local[lid] = max(tmp_local[lid + 2], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(lid == 0)
- {
- max_val_vec = max(tmp_local[lid + 1], tmp_local[lid]);
- max_local = MAX_REDUCE(max_val_vec, VECTOR_SIZE);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- /* Second section */
-
- // Set sum vector
- VEC_TYPE sum1D = 0;
- DATA_TYPE max_val = max_local;
-
- // Shift values, exp and sum
- for(i = 0; i < width; ++i)
- {
- VEC_TYPE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- data -= max_val;
-#ifdef BETA
- data *= beta;
-#endif /* BETA */
-#ifdef LOG_SOFTMAX
- VSTORE(VECTOR_SIZE)
- (data, 0, (__global DATA_TYPE *)(dst_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- data = exp(data);
-#else /* LOG_SOFTMAX */
- data = exp(data);
- VSTORE(VECTOR_SIZE)
- (data, 0, (__global DATA_TYPE *)(dst_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
-#endif /* LOG_SOFTMAX */
- sum1D += data;
- }
-#ifdef NON_MULTIPLE_OF_GRID_SIZE
- //TODO: Optimize the calculation (avoid %).
- boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
- if(lid < boundary_workitems)
- {
- VEC_TYPE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(__global DATA_TYPE *)(src_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- data -= max_val;
-#ifdef BETA
- data *= beta;
-#endif /* BETA */
-#ifdef LOG_SOFTMAX
- VSTORE(VECTOR_SIZE)
- (data, 0, (__global DATA_TYPE *)(dst_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- data = exp(data);
-#else /* LOG_SOFTMAX */
- data = exp(data);
- VSTORE(VECTOR_SIZE)
- (data, 0, (__global DATA_TYPE *)(dst_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
-#endif /* LOG_SOFTMAX */
- sum1D += data;
- }
-#ifdef NON_MULTIPLE_OF_VECTOR_SIZE
- if(lid == 0)
- {
- // Handle non multiple of vector size ((GRID_SIZE * i * 4) + 4, 0); move 4 float positions ahead, *4 is due to the stride
- VEC_TYPE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr - VECTOR_SIZE_LEFTOVER * sizeof(DATA_TYPE)));
- data -= max_val;
-#ifdef BETA
- data *= beta;
-#endif /* BETA */
-#ifdef LOG_SOFTMAX
- VSTORE_PARTIAL(VECTOR_SIZE, VECTOR_SIZE_LEFTOVER)
- (data, 0, (__global DATA_TYPE *)(dst_addr - VECTOR_SIZE_LEFTOVER * sizeof(DATA_TYPE)));
- data = exp(data);
- data = select(0, data, widx);
-#else /* LOG_SOFTMAX */
- data = exp(data);
- data = select(0, data, widx);
- VSTORE_PARTIAL(VECTOR_SIZE, VECTOR_SIZE_LEFTOVER)
- (data, 0, (__global DATA_TYPE *)(dst_addr - VECTOR_SIZE_LEFTOVER * sizeof(DATA_TYPE)));
-#endif /* LOG_SOFTMAX */
- sum1D += data;
- }
-#endif /* NON_MULTIPLE_OF_VECTOR_SIZE */
-#endif /* NON_MULTIPLE_OF_GRID_SIZE */
- tmp_local[lid] = sum1D;
-
- barrier(CLK_LOCAL_MEM_FENCE);
-
- if(GRID_SIZE >= 256)
- {
- if(lid < 128)
- {
- tmp_local[lid] += tmp_local[lid + 128];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 128)
- {
- if(lid < 64)
- {
- tmp_local[lid] += tmp_local[lid + 64];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 64)
- {
- if(lid < 32)
- {
- tmp_local[lid] += tmp_local[lid + 32];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 32)
- {
- if(lid < 16)
- {
- tmp_local[lid] += tmp_local[lid + 16];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 16)
- {
- if(lid < 8)
- {
- tmp_local[lid] += tmp_local[lid + 8];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 8)
- {
- if(lid < 4)
- {
- tmp_local[lid] += tmp_local[lid + 4];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 4)
- {
- if(lid < 2)
- {
- tmp_local[lid] += tmp_local[lid + 2];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(lid == 0)
- {
- sum1D = (tmp_local[lid + 1] + tmp_local[lid]);
- // Perform sum reduction
- *((__global DATA_TYPE *)sum.ptr) = SUM_REDUCE(sum1D, VECTOR_SIZE);
- }
-}
-
-#endif // defined(SRC_WIDTH) && defined(LOG_VECTOR_SIZE) && defined(MINVAL)
-#endif // defined(DATA_TYPE) && defined(MIN_VALUE) && defined(VECTOR_SIZE) && defined(VECTOR_SIZE_LEFTOVER) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/softmax_layer_quantized.cl b/src/core/CL/cl_kernels/softmax_layer_quantized.cl
deleted file mode 100644
index b7a6e00dfa..0000000000
--- a/src/core/CL/cl_kernels/softmax_layer_quantized.cl
+++ /dev/null
@@ -1,532 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers_asymm.h"
-
-#if defined(DATA_TYPE) && defined(MIN_VALUE) && defined(VECTOR_SIZE) && defined(VECTOR_SIZE_LEFTOVER) && defined(DIFF_MIN)
-
-#define VEC_BASE VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
-#define VEC_INT VEC_DATA_TYPE(int, VECTOR_SIZE)
-
-/** Divides all the values of the input tensor by the sum calculated from softmax_layer_shift_exp_sum kernel.
- *
- * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE, e.g. -DDATA_TYPE=uchar
- * @note The zero value for the given data type must be given as a preprocessor argument using -DMIN_VALUE, e.g. -DMIN_VALUE=-128
- * @note Vector size should be given as a preprocessor argument using -DVECTOR_SIZE=size. e.g. -DVECTOR_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVECTOR_SIZE_LEFTOVER. e.g. -DVECTOR_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VECTOR_SIZE
- * @note Quantized beta can be optionally passed at compile time using -DINPUT_BETA_MULTIPLIER and -DINPUT_BETA_LEFT_SHIFT (if undefined, assume beta equals 1.0)
- * @note Additional quantization data must be passed at compile time using -DSCALED_DIFF_INT_BITS and -DEXP_ACCUMULATION_INT_BITS.
- * @note -DDIFF_MIN must be passed at compile time. It is threshold difference between maximum value of input data and current processed value, it defines whether the value will be taken into account or not.
- * @note In case the input's data type is QASYMM8_SIGNED, -DQASYMM8_SIGNED must be passed.
- *
- * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: S32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes)
- * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes)
- * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] sum_stride_z Stride of the sum values tensor in Z dimension (in bytes)
- * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor
- * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void softmax_layer_norm_quantized(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(sum),
- TENSOR3D_DECLARATION(dst))
-{
- const int x_offs = max((int)(get_global_id(0) * VECTOR_SIZE - (VECTOR_SIZE - VECTOR_SIZE_LEFTOVER) % VECTOR_SIZE), 0);
-
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs * sizeof(int) + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
-
- Image sum = CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(sum);
-
- // Load max value of 1D logits vector (row)
- int sum_val = *((__global int *)offset(&sum, 0, get_global_id(1)));
-
- // It will be better to calculate this in prev layer and pass here as parameter
- uint sum_val_u = convert_uint(sum_val);
- int headroom_plus_one = clz(sum_val_u);
- int num_bits_over_unit = EXP_ACCUMULATION_INT_BITS - headroom_plus_one;
- int shifted_sum_minus_one_1 = convert_int((sum_val_u << headroom_plus_one) - (1u << 31));
- VEC_INT shifted_sum_minus_one = shifted_sum_minus_one_1;
- VEC_INT shifted_scale = ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(shifted_sum_minus_one, VECTOR_SIZE);
-
- // It was already calculated in prev layer, should be stored into tmp output and reused
- VEC_INT data_diff = VLOAD(VECTOR_SIZE)(0, (__global int *)src_addr);
- VEC_INT data_diff_mult = data_diff;
-#if defined(INPUT_BETA_MULTIPLIER) && defined(INPUT_BETA_LEFT_SHIFT)
- if(INPUT_BETA_MULTIPLIER > 1)
- {
- data_diff_mult = ASYMM_MULT(data_diff * (1 << INPUT_BETA_LEFT_SHIFT), INPUT_BETA_MULTIPLIER, VECTOR_SIZE);
- }
-#endif /* defined(INPUT_BETA_MULTIPLIER) && defined(INPUT_BETA_LEFT_SHIFT) */
-
- VEC_INT data = ASYMM_EXP_ON_NEGATIVE_VALUES(data_diff_mult, SCALED_DIFF_INT_BITS, VECTOR_SIZE);
- data = ASYMM_MULT(shifted_scale, data, VECTOR_SIZE);
- data = ASYMM_ROUNDING_DIVIDE_BY_POW2(data, num_bits_over_unit + 31 - 8, VECTOR_SIZE);
-#ifdef QASYMM8_SIGNED
- data += (VEC_INT)(MIN_VALUE);
-#endif /* QASYMM8_SIGNED */
- data = select(MIN_VALUE, data, data_diff >= (VEC_INT)(DIFF_MIN));
- VEC_BASE data0 = CONVERT_SAT(data, VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE));
-
- STORE_VECTOR_SELECT(data, DATA_TYPE, dst_addr, VECTOR_SIZE, VECTOR_SIZE_LEFTOVER, VECTOR_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
-}
-
-#if defined(SRC_WIDTH) && defined(LOG_VECTOR_SIZE)
-
-/* Number of workitems in dimension 0. */
-#if !defined(GRID_SIZE)
-#define GRID_SIZE 1
-#endif /* !defined(GRID_SIZE) */
-
-#define VEC_UINT VEC_DATA_TYPE(uint, VECTOR_SIZE)
-
-VEC_INT mult_by_quantized_multiplier(VEC_INT data)
-{
-#if defined(INPUT_BETA_MULTIPLIER) && defined(INPUT_BETA_LEFT_SHIFT)
- if(INPUT_BETA_MULTIPLIER > 1)
- {
- return ASYMM_MULT(data * (1 << INPUT_BETA_LEFT_SHIFT), INPUT_BETA_MULTIPLIER, VECTOR_SIZE);
- }
-#endif /* defined(INPUT_BETA_MULTIPLIER) && defined(INPUT_BETA_LEFT_SHIFT) */
- return data;
-}
-
-/** Shifts the values of the input tensor by the max calculated in softmax_layer_max kernel,
- * then gets the exponent of each element as sums all elements across each row.
- *
- * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE, e.g. -DDATA_TYPE=uchar
- * @note The zero value for the given data type must be given as a preprocessor argument using -DMIN_VALUE, e.g. -DMIN_VALUE=-128
- * @note Vector size should be given as a preprocessor argument using -DVECTOR_SIZE=size. e.g. -DVECTOR_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVECTOR_SIZE_LEFTOVER. e.g. -DVECTOR_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VECTOR_SIZE
- * @note In case the input is not multiple of VECTOR_SIZE -DNON_MULTIPLE_OF_VECTOR_SIZE must be passed.
- * @note Quantized beta can be optionally passed at compile time using -DINPUT_BETA_MULTIPLIER and -DINPUT_BETA_LEFT_SHIFT (if undefined, assume beta equals 1.0)
- * @note Additional quantization data must be passed at compile time using -DSCALED_DIFF_INT_BITS and -DEXP_ACCUMULATION_INT_BITS.
- * @note -DDIFF_MIN must be passed at compile time. It is threshold difference between maximum value of input data and current processed value, it defines whether the value will be taken into account or not.
- * @note In case the input's data type is QASYMM8_SIGNED, -DQASYMM8_SIGNED must be passed.
- *
- * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] max_ptr Pointer to the max values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] max_stride_x Stride of the max values tensor in X dimension (in bytes)
- * @param[in] max_step_x max_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] max_stride_y Stride of the max values tensor in Y dimension (in bytes)
- * @param[in] max_step_y max_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] max_stride_z Stride of the max values tensor in Z dimension (in bytes)
- * @param[in] max_step_z max_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] max_offset_first_element_in_bytes The offset of the first element in the max values tensor
- * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: S32
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[out] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p dst_ptr
- * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes)
- * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes)
- * @param[in] sum_step_y sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_stride_z Stride of the sum values tensor in Z dimension (in bytes)
- * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor
- */
-__kernel void softmax_layer_max_shift_exp_sum_quantized_serial(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(maxo),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(sum))
-{
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
-
- Image maxo = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(maxo);
- Image sum = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(sum);
-
- VEC_BASE max_val_vec = (VEC_BASE)(MIN_VALUE);
-
- // Calculate max of row
-#ifdef NON_MULTIPLE_OF_VECTOR_SIZE
- VEC_BASE vec_min_val = (VEC_BASE)(MIN_VALUE);
- VEC_BASE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)src_addr);
- VEC_INT widx = (VEC_INT)VECTOR_SIZE_LEFTOVER > VEC_OFFS(int, VECTOR_SIZE);
- max_val_vec = max(max_val_vec, select(vec_min_val, data, CONVERT(widx, VEC_BASE)));
-#endif /* NON_MULTIPLE_OF_VECTOR_SIZE */
-
- for(uint i = VECTOR_SIZE_LEFTOVER; i < SRC_WIDTH; i += VECTOR_SIZE)
- {
- VEC_BASE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + i * sizeof(DATA_TYPE)));
- max_val_vec = max(data, max_val_vec);
- }
-
- // Perform max reduction
- DATA_TYPE max_local = MAX_REDUCE(max_val_vec, VECTOR_SIZE);
- *((__global DATA_TYPE *)maxo.ptr) = max_local;
-
- // Second part
-
- // Load max value of 1D logits vector (row)
- int max_val = convert_int(max_local);
-
- // Set sum vector, Q(EXP_ACCUMULATION_INT_BITS)
- VEC_INT sum1D = 0;
-
-#ifdef NON_MULTIPLE_OF_VECTOR_SIZE
- VEC_INT data_fp = CONVERT(data, VEC_INT);
- VEC_INT data_diff = data_fp - max_val;
- VEC_INT data_diff_mult = mult_by_quantized_multiplier(data_diff);
- data_fp = ASYMM_EXP_ON_NEGATIVE_VALUES(data_diff_mult, SCALED_DIFF_INT_BITS, VECTOR_SIZE);
- data_fp = ASYMM_RESCALE(data_fp, 0, EXP_ACCUMULATION_INT_BITS, VECTOR_SIZE);
- VSTORE_PARTIAL(VECTOR_SIZE, VECTOR_SIZE_LEFTOVER)
- (data_diff, 0, (__global int *)dst_addr);
- data_fp = select(0, data_fp, data_diff >= (VEC_INT)(DIFF_MIN));
- sum1D += select(0, data_fp, widx);
-#endif /* NON_MULTIPLE_OF_VECTOR_SIZE */
-
- // Shift values, exp and sum
- for(uint i = VECTOR_SIZE_LEFTOVER; i < SRC_WIDTH; i += VECTOR_SIZE)
- {
- VEC_BASE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + i * sizeof(DATA_TYPE)));
- VEC_INT data_fp = CONVERT(data, VEC_INT);
- VEC_INT data_diff = data_fp - max_val;
- VEC_INT data_diff_mult = mult_by_quantized_multiplier(data_diff);
- data_fp = ASYMM_EXP_ON_NEGATIVE_VALUES(data_diff_mult, SCALED_DIFF_INT_BITS, VECTOR_SIZE);
- data_fp = ASYMM_RESCALE(data_fp, 0, EXP_ACCUMULATION_INT_BITS, VECTOR_SIZE);
- VSTORE(VECTOR_SIZE)
- (data_diff, 0, (__global int *)(dst_addr + i * sizeof(int)));
- sum1D = sum1D + select(0, data_fp, data_diff >= (VEC_INT)(DIFF_MIN));
- }
-
- // Perform sum reduction
- *((__global int *)sum.ptr) = SUM_REDUCE(sum1D, VECTOR_SIZE);
-}
-
-/** Identifies the maximum value across the 1st dimension and shifts the values of the input tensor by this maximum value,
- * then gets the exponent of each element as sums all elements across each row.
- *
- * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE, e.g. -DDATA_TYPE=uchar
- * @note The zero value for the given data type must be given as a preprocessor argument using -DMIN_VALUE, e.g. -DMIN_VALUE=-128
- * @note Vector size should be given as a preprocessor argument using -DVECTOR_SIZE=size. e.g. -DVECTOR_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVECTOR_SIZE_LEFTOVER. e.g. -DVECTOR_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VECTOR_SIZE
- * @note In case the input is not a multiple of VECTOR_SIZE (2,4,8,16) -DNON_MULTIPLE_OF_VECTOR_SIZE must be passed.
- * @note Quantized beta can be optionally passed at compile time using -DINPUT_BETA_MULTIPLIER and -DINPUT_BETA_LEFT_SHIFT (if undefined, assume beta equals 1.0)
- * @note Additional quantization data must be passed at compile time using -DSCALED_DIFF_INT_BITS and -DEXP_ACCUMULATION_INT_BITS.
- * @note -DDIFF_MIN must be passed at compile time. It is threshold difference between maximum value of input data and current processed value, it defines whether the value will be taken into account or not.
- * @note In case the input's data type is QASYMM8_SIGNED, -DQASYMM8_SIGNED must be passed.
- *
- * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] maxo_ptr Pointer to the max values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] maxo_stride_x Stride of the max values tensor in X dimension (in bytes)
- * @param[in] maxo_step_x max_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] maxo_stride_y Stride of the max values tensor in Y dimension (in bytes)
- * @param[in] maxo_step_y max_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] maxo_stride_z Stride of the max values tensor in Z dimension (in bytes)
- * @param[in] maxo_step_z max_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] maxo_offset_first_element_in_bytes The offset of the first element in the max values tensor
- * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[out] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes)
- * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes)
- * @param[in] sum_step_y sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_stride_z Stride of the sum values tensor in Z dimension (in bytes)
- * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor
- */
-__kernel void softmax_layer_max_shift_exp_sum_quantized_parallel(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(maxo),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(sum))
-{
- const uint lid = get_local_id(0);
- const uint x_offs = (VECTOR_SIZE_LEFTOVER + lid * VECTOR_SIZE);
-
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * sizeof(int) + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
-
- Image maxo = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(maxo);
- Image sum = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(sum);
-
- // Define one temporary vector per work-item.
- __local VEC_INT tmp_local[GRID_SIZE];
- __local DATA_TYPE max_local;
-
- VEC_BASE vec_min_val = (VEC_BASE)(MIN_VALUE);
- VEC_BASE max_val_vec = vec_min_val;
-
- // Number of iterations per work-item.
- const uint width = (SRC_WIDTH / GRID_SIZE) >> LOG_VECTOR_SIZE;
- // Calculate max of row
- uint i = 0;
- for(; i < width; ++i)
- {
- VEC_BASE data_max = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- max_val_vec = max(data_max, max_val_vec);
- }
-#ifdef NON_MULTIPLE_OF_GRID_SIZE
- // How many work-items needed to complete the computation.
- //TODO: Optimize this calculation (avoid %).
- int boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
- if(lid < boundary_workitems)
- {
- VEC_BASE data_max = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- max_val_vec = max(data_max, max_val_vec);
- }
-#ifdef NON_MULTIPLE_OF_VECTOR_SIZE
- VEC_INT widx;
- if(lid == 0)
- {
- // Handle non multiple of 4
- VEC_BASE data_max = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr - VECTOR_SIZE_LEFTOVER * sizeof(DATA_TYPE)));
- widx = (VEC_INT)VECTOR_SIZE_LEFTOVER > VEC_OFFS(int, VECTOR_SIZE);
- max_val_vec = max(max_val_vec, select(vec_min_val, data_max, CONVERT(widx, VEC_BASE)));
- }
-#endif /* NON_MULTIPLE_OF_VECTOR_SIZE */
-#endif /* NON_MULTIPLE_OF_GRID_SIZE */
- tmp_local[lid] = CONVERT(max_val_vec, VEC_INT);
-
- barrier(CLK_LOCAL_MEM_FENCE);
-
- if(GRID_SIZE >= 256)
- {
- if(lid < 128)
- {
- tmp_local[lid] = max(tmp_local[lid + 128], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 128)
- {
- if(lid < 64)
- {
- tmp_local[lid] = max(tmp_local[lid + 64], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 64)
- {
- if(lid < 32)
- {
- tmp_local[lid] = max(tmp_local[lid + 32], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 32)
- {
- if(lid < 16)
- {
- tmp_local[lid] = max(tmp_local[lid + 16], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 16)
- {
- if(lid < 8)
- {
- tmp_local[lid] = max(tmp_local[lid + 8], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 8)
- {
- if(lid < 4)
- {
- tmp_local[lid] = max(tmp_local[lid + 4], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 4)
- {
- if(lid < 2)
- {
- tmp_local[lid] = max(tmp_local[lid + 2], tmp_local[lid]);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(lid == 0)
- {
- max_val_vec = max(CONVERT((tmp_local[lid + 1]), VEC_BASE), CONVERT((tmp_local[lid]), VEC_BASE));
- max_local = MAX_REDUCE(max_val_vec, VECTOR_SIZE);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- /* Second section */
-
- // Set sum vector
- VEC_INT sum1D = 0;
- int max_val = convert_int(max_local);
-
- // Shift values, exp and sum
- for(i = 0; i < width; ++i)
- {
- VEC_BASE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- VEC_INT data_fp = CONVERT(data, VEC_INT);
- VEC_INT data_diff = data_fp - max_val;
- VEC_INT data_diff_mult = mult_by_quantized_multiplier(data_diff);
- data_fp = ASYMM_EXP_ON_NEGATIVE_VALUES(data_diff_mult, SCALED_DIFF_INT_BITS, VECTOR_SIZE);
- data_fp = ASYMM_RESCALE(data_fp, 0, EXP_ACCUMULATION_INT_BITS, VECTOR_SIZE);
- VSTORE(VECTOR_SIZE)
- (data_diff, 0, (__global int *)(dst_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(int)));
- sum1D = sum1D + select(0, data_fp, data_diff >= (VEC_INT)(DIFF_MIN));
- }
-#ifdef NON_MULTIPLE_OF_GRID_SIZE
- //TODO: Optimize the calculation (avoid %).
- boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
- if(lid < boundary_workitems)
- {
- VEC_BASE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(DATA_TYPE)));
- VEC_INT data_fp = CONVERT(data, VEC_INT);
- VEC_INT data_diff = data_fp - max_val;
- VEC_INT data_diff_mult = mult_by_quantized_multiplier(data_diff);
- data_fp = ASYMM_EXP_ON_NEGATIVE_VALUES(data_diff_mult, SCALED_DIFF_INT_BITS, VECTOR_SIZE);
- data_fp = ASYMM_RESCALE(data_fp, 0, EXP_ACCUMULATION_INT_BITS, VECTOR_SIZE);
- VSTORE(VECTOR_SIZE)
- (data_diff, 0, (__global int *)(dst_addr + (i * GRID_SIZE * VECTOR_SIZE) * sizeof(int)));
- sum1D = sum1D + select(0, data_fp, data_diff >= (VEC_INT)(DIFF_MIN));
- }
-#ifdef NON_MULTIPLE_OF_VECTOR_SIZE
- if(lid == 0)
- {
- // Handle non multiple of vector size ((GRID_SIZE * i * 4) + 4, 0); move 4 float positions ahead, *4 is due to the stride
- VEC_BASE data = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)(src_addr - VECTOR_SIZE_LEFTOVER * sizeof(DATA_TYPE)));
- VEC_INT data_fp = CONVERT(data, VEC_INT);
- VEC_INT data_diff = data_fp - max_val;
- VEC_INT data_diff_mult = mult_by_quantized_multiplier(data_diff);
- data_fp = ASYMM_EXP_ON_NEGATIVE_VALUES(data_diff_mult, SCALED_DIFF_INT_BITS, VECTOR_SIZE);
- data_fp = ASYMM_RESCALE(data_fp, 0, EXP_ACCUMULATION_INT_BITS, VECTOR_SIZE);
- VSTORE_PARTIAL(VECTOR_SIZE, VECTOR_SIZE_LEFTOVER)
- (data_diff, 0, (__global int *)(dst_addr - VECTOR_SIZE_LEFTOVER * sizeof(int)));
- data_fp = select(MIN_VALUE, data_fp, data_diff >= (VEC_INT)(DIFF_MIN));
- data_fp = select(0, data_fp, widx);
- sum1D = sum1D + data_fp;
- }
-#endif /* NON_MULTIPLE_OF_VECTOR_SIZE */
-#endif /* NON_MULTIPLE_OF_GRID_SIZE */
- tmp_local[lid] = sum1D;
-
- barrier(CLK_LOCAL_MEM_FENCE);
-
- if(GRID_SIZE >= 256)
- {
- if(lid < 128)
- {
- tmp_local[lid] += tmp_local[lid + 128];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 128)
- {
- if(lid < 64)
- {
- tmp_local[lid] += tmp_local[lid + 64];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 64)
- {
- if(lid < 32)
- {
- tmp_local[lid] += tmp_local[lid + 32];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 32)
- {
- if(lid < 16)
- {
- tmp_local[lid] += tmp_local[lid + 16];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 16)
- {
- if(lid < 8)
- {
- tmp_local[lid] += tmp_local[lid + 8];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 8)
- {
- if(lid < 4)
- {
- tmp_local[lid] += tmp_local[lid + 4];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(GRID_SIZE >= 4)
- {
- if(lid < 2)
- {
- tmp_local[lid] += tmp_local[lid + 2];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if(lid == 0)
- {
- sum1D = (tmp_local[lid + 1] + tmp_local[lid]);
- // Perform sum reduction
- *((__global int *)sum.ptr) = SUM_REDUCE(sum1D, VECTOR_SIZE);
- }
-}
-#endif // #if defined(SRC_WIDTH) && defined(LOG_VECTOR_SIZE)
-#endif /* defined(DATA_TYPE) && defined(DIFF_MIN) && defined(VECTOR_SIZE) && defined(VECTOR_SIZE_LEFTOVER) && defined(MIN_VALUE) */
diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h
new file mode 100644
index 0000000000..8129606277
--- /dev/null
+++ b/src/core/CL/cl_kernels/tile_helpers.h
@@ -0,0 +1,1451 @@
+/*
+ * Copyright (c) 2021-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CORE_CL_CL_KERNELS_TILE_HELPERS
+#define ACL_SRC_CORE_CL_CL_KERNELS_TILE_HELPERS
+
+// *INDENT-OFF*
+// clang-format off
+
+#define TILE_VECTOR_SIZE1 1
+#define TILE_VECTOR_SIZE2 2
+#define TILE_VECTOR_SIZE3 3
+#define TILE_VECTOR_SIZE4 4
+#define TILE_VECTOR_SIZE5 8
+#define TILE_VECTOR_SIZE6 8
+#define TILE_VECTOR_SIZE7 8
+#define TILE_VECTOR_SIZE8 8
+#define TILE_VECTOR_SIZE9 16
+#define TILE_VECTOR_SIZE10 16
+#define TILE_VECTOR_SIZE11 16
+#define TILE_VECTOR_SIZE12 16
+#define TILE_VECTOR_SIZE13 16
+#define TILE_VECTOR_SIZE14 16
+#define TILE_VECTOR_SIZE15 16
+#define TILE_VECTOR_SIZE16 16
+
+#define TILE_VECTOR_TYPE1(DATA_TYPE) DATA_TYPE##1
+#define TILE_VECTOR_TYPE2(DATA_TYPE) DATA_TYPE##2
+#define TILE_VECTOR_TYPE3(DATA_TYPE) DATA_TYPE##3
+#define TILE_VECTOR_TYPE4(DATA_TYPE) DATA_TYPE##4
+#define TILE_VECTOR_TYPE5(DATA_TYPE) DATA_TYPE##8
+#define TILE_VECTOR_TYPE6(DATA_TYPE) DATA_TYPE##8
+#define TILE_VECTOR_TYPE7(DATA_TYPE) DATA_TYPE##8
+#define TILE_VECTOR_TYPE8(DATA_TYPE) DATA_TYPE##8
+#define TILE_VECTOR_TYPE9(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE10(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE11(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE12(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE13(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE14(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE15(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE16(DATA_TYPE) DATA_TYPE##16
+
+/** Tile object
+ * A tile object is a 2D memory block and can be accessed using the following syntax:
+ * -# a[m0].v = access the the vector at row "m0" (OpenCL vector)
+ * -# dst[m0].s[n0] = access the scalar element at row "m0" and column "n0" (scalar access)
+ *
+ * @param[in] DATA_TYPE Data type of the tile
+ * @param[in] H Number of tile rows
+ * @param[in] W Number of tile colums
+ * @param[in] BASENAME Tile's name
+ */
+#define TILE(DATA_TYPE, H, W, BASENAME) TILE_STR(DATA_TYPE, H, W, BASENAME)
+#define TILE_STR(DATA_TYPE, H, W, BASENAME) \
+ union { \
+ DATA_TYPE s[TILE_VECTOR_SIZE##W]; \
+ TILE_VECTOR_TYPE##W(DATA_TYPE) v; \
+ } BASENAME[H]
+
+#define TENSOR4D_IMAGE(name) \
+ __read_only image2d_t name##_img, \
+ __global uchar *name##_ptr, \
+ uint name##_stride_x, \
+ uint name##_step_x, \
+ uint name##_stride_y, \
+ uint name##_step_y, \
+ uint name##_stride_z, \
+ uint name##_step_z, \
+ uint name##_stride_w, \
+ uint name##_step_w, \
+ uint name##_offset_first_element_in_bytes
+
+#define TENSOR4D_BUFFER(name) \
+ __global uchar *name##_ptr, \
+ uint name##_stride_x, \
+ uint name##_step_x, \
+ uint name##_stride_y, \
+ uint name##_step_y, \
+ uint name##_stride_z, \
+ uint name##_step_z, \
+ uint name##_stride_w, \
+ uint name##_step_w, \
+ uint name##_offset_first_element_in_bytes
+
+#define TENSOR4D_STR(name, type) TENSOR4D_##type(name)
+#define TENSOR4D(name, type) TENSOR4D_STR(name, type)
+
+#define TENSOR4D_T_IMAGE(name) \
+ __read_only image2d_t name##_img, \
+ __global uchar *name##_ptr, \
+ uint name##_stride_y, \
+ uint name##_stride_z, \
+ uint name##_stride_w, \
+ uint name##_c, \
+ uint name##_w, \
+ uint name##_h, \
+ uint name##_n, \
+ uint name##_offset_first_element_in_bytes
+
+#define TENSOR4D_T_BUFFER(name) \
+ __global uchar *name##_ptr, \
+ uint name##_stride_y, \
+ uint name##_stride_z, \
+ uint name##_stride_w, \
+ uint name##_c, \
+ uint name##_w, \
+ uint name##_h, \
+ uint name##_n, \
+ uint name##_offset_first_element_in_bytes
+
+#define TENSOR4D_T_STR(name, type) TENSOR4D_T_##type(name)
+
+/** Legacy tensor 4D arguments
+ *
+ * @param[in] name Tensor name. The tensor name is the prefix of the tensor components
+ * @param[in] type Tensor type (BUFFER or IMAGE)
+ */
+#define TENSOR4D_T(name, type) TENSOR4D_T_STR(name, type)
+
+#define TENSOR4D_RO_T_IMAGE(name) \
+ __read_only image2d_t name##_img, \
+ TENSOR4D_T_BUFFER(name)
+
+#define TENSOR4D_RO_T_BUFFER(name) TENSOR4D_T_BUFFER(name)
+
+#define TENSOR4D_RO_T_STR(name, type) TENSOR4D_RO_T_##type(name)
+
+/** Read-Only (RO) tensor 4D.
+ *
+ * @param[in] name Tensor name. The tensor name is the prefix of the tensor components
+ * @param[in] type Tensor type (BUFFER or IMAGE)
+ */
+#define TENSOR4D_RO_T(name, type) TENSOR4D_RO_T_STR(name, type)
+
+#define TENSOR4D_WO_T_IMAGE(name) \
+ __write_only image2d_t name##_img, \
+ TENSOR4D_T_BUFFER(name)
+
+#define TENSOR4D_WO_T_BUFFER(name) TENSOR4D_T_BUFFER(name)
+
+#define TENSOR4D_WO_T_STR(name, type) TENSOR4D_WO_T_##type(name)
+
+/** Write-Only (WO) tensor 4D.
+ *
+ * @param[in] name Tensor name. The tensor name is the prefix of the tensor components
+ * @param[in] type Tensor type (BUFFER or IMAGE)
+ */
+#define TENSOR4D_WO_T(name, type) TENSOR4D_WO_T_STR(name, type)
+
+#define TENSOR3D_T_IMAGE(name) \
+ __read_only image2d_t name##_img, \
+ __global uchar *name##_ptr, \
+ uint name##_stride_y, \
+ uint name##_stride_z, \
+ uint name##_w, \
+ uint name##_h, \
+ uint name##_n, \
+ uint name##_offset_first_element_in_bytes
+
+#define TENSOR3D_T_BUFFER(name) \
+ __global uchar *name##_ptr, \
+ uint name##_stride_y, \
+ uint name##_stride_z, \
+ uint name##_w, \
+ uint name##_h, \
+ uint name##_n, \
+ uint name##_offset_first_element_in_bytes
+
+#define TENSOR3D_T_STR(name, type) TENSOR3D_T_##type(name)
+#define TENSOR3D_T(name, type) TENSOR3D_T_STR(name, type)
+
+#if !defined(UNROLL_WITH_PRAGMA)
+#define UNROLL_INCR(idx, step, macro) idx += (step); (macro)
+
+#define LOOP_UNROLLING_1(idx, step, macro) (macro)
+#define LOOP_UNROLLING_2(idx, step, macro) LOOP_UNROLLING_1(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_3(idx, step, macro) LOOP_UNROLLING_2(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_4(idx, step, macro) LOOP_UNROLLING_3(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_5(idx, step, macro) LOOP_UNROLLING_4(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_6(idx, step, macro) LOOP_UNROLLING_5(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_7(idx, step, macro) LOOP_UNROLLING_6(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_8(idx, step, macro) LOOP_UNROLLING_7(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_9(idx, step, macro) LOOP_UNROLLING_8(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_10(idx, step, macro) LOOP_UNROLLING_9(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_11(idx, step, macro) LOOP_UNROLLING_10(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_12(idx, step, macro) LOOP_UNROLLING_11(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_13(idx, step, macro) LOOP_UNROLLING_12(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_14(idx, step, macro) LOOP_UNROLLING_13(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_15(idx, step, macro) LOOP_UNROLLING_14(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_16(idx, step, macro) LOOP_UNROLLING_15(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_17(idx, step, macro) LOOP_UNROLLING_16(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_18(idx, step, macro) LOOP_UNROLLING_17(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_19(idx, step, macro) LOOP_UNROLLING_18(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_20(idx, step, macro) LOOP_UNROLLING_19(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_21(idx, step, macro) LOOP_UNROLLING_20(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_22(idx, step, macro) LOOP_UNROLLING_21(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_23(idx, step, macro) LOOP_UNROLLING_22(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_24(idx, step, macro) LOOP_UNROLLING_23(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_25(idx, step, macro) LOOP_UNROLLING_24(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_26(idx, step, macro) LOOP_UNROLLING_25(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_27(idx, step, macro) LOOP_UNROLLING_26(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_28(idx, step, macro) LOOP_UNROLLING_27(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_29(idx, step, macro) LOOP_UNROLLING_28(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_30(idx, step, macro) LOOP_UNROLLING_29(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_31(idx, step, macro) LOOP_UNROLLING_30(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_32(idx, step, macro) LOOP_UNROLLING_31(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_33(idx, step, macro) LOOP_UNROLLING_32(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_34(idx, step, macro) LOOP_UNROLLING_33(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_35(idx, step, macro) LOOP_UNROLLING_34(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_36(idx, step, macro) LOOP_UNROLLING_35(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_37(idx, step, macro) LOOP_UNROLLING_36(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_38(idx, step, macro) LOOP_UNROLLING_37(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_39(idx, step, macro) LOOP_UNROLLING_38(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_40(idx, step, macro) LOOP_UNROLLING_39(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_41(idx, step, macro) LOOP_UNROLLING_40(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_42(idx, step, macro) LOOP_UNROLLING_41(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_43(idx, step, macro) LOOP_UNROLLING_42(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_44(idx, step, macro) LOOP_UNROLLING_43(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_45(idx, step, macro) LOOP_UNROLLING_44(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_46(idx, step, macro) LOOP_UNROLLING_45(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_47(idx, step, macro) LOOP_UNROLLING_46(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_48(idx, step, macro) LOOP_UNROLLING_47(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_49(idx, step, macro) LOOP_UNROLLING_48(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_50(idx, step, macro) LOOP_UNROLLING_49(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_51(idx, step, macro) LOOP_UNROLLING_50(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_52(idx, step, macro) LOOP_UNROLLING_51(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_53(idx, step, macro) LOOP_UNROLLING_52(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_54(idx, step, macro) LOOP_UNROLLING_53(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_55(idx, step, macro) LOOP_UNROLLING_54(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_56(idx, step, macro) LOOP_UNROLLING_55(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_57(idx, step, macro) LOOP_UNROLLING_56(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_58(idx, step, macro) LOOP_UNROLLING_57(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_59(idx, step, macro) LOOP_UNROLLING_58(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_60(idx, step, macro) LOOP_UNROLLING_59(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_61(idx, step, macro) LOOP_UNROLLING_60(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_62(idx, step, macro) LOOP_UNROLLING_61(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_63(idx, step, macro) LOOP_UNROLLING_62(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_64(idx, step, macro) LOOP_UNROLLING_63(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_65(idx, step, macro) LOOP_UNROLLING_64(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_66(idx, step, macro) LOOP_UNROLLING_65(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_67(idx, step, macro) LOOP_UNROLLING_66(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_68(idx, step, macro) LOOP_UNROLLING_67(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_69(idx, step, macro) LOOP_UNROLLING_68(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_70(idx, step, macro) LOOP_UNROLLING_69(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_71(idx, step, macro) LOOP_UNROLLING_70(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_72(idx, step, macro) LOOP_UNROLLING_71(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_73(idx, step, macro) LOOP_UNROLLING_72(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_74(idx, step, macro) LOOP_UNROLLING_73(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_75(idx, step, macro) LOOP_UNROLLING_74(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_76(idx, step, macro) LOOP_UNROLLING_75(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_77(idx, step, macro) LOOP_UNROLLING_76(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_78(idx, step, macro) LOOP_UNROLLING_77(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_79(idx, step, macro) LOOP_UNROLLING_78(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_80(idx, step, macro) LOOP_UNROLLING_79(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_81(idx, step, macro) LOOP_UNROLLING_80(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_82(idx, step, macro) LOOP_UNROLLING_81(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_83(idx, step, macro) LOOP_UNROLLING_82(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_84(idx, step, macro) LOOP_UNROLLING_83(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_85(idx, step, macro) LOOP_UNROLLING_84(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_86(idx, step, macro) LOOP_UNROLLING_85(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_87(idx, step, macro) LOOP_UNROLLING_86(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_88(idx, step, macro) LOOP_UNROLLING_87(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_89(idx, step, macro) LOOP_UNROLLING_88(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_90(idx, step, macro) LOOP_UNROLLING_89(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_91(idx, step, macro) LOOP_UNROLLING_90(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_92(idx, step, macro) LOOP_UNROLLING_91(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_93(idx, step, macro) LOOP_UNROLLING_92(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_94(idx, step, macro) LOOP_UNROLLING_93(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_95(idx, step, macro) LOOP_UNROLLING_94(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_96(idx, step, macro) LOOP_UNROLLING_95(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_97(idx, step, macro) LOOP_UNROLLING_96(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_98(idx, step, macro) LOOP_UNROLLING_97(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_99(idx, step, macro) LOOP_UNROLLING_98(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_100(idx, step, macro) LOOP_UNROLLING_99(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_101(idx, step, macro) LOOP_UNROLLING_100(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_102(idx, step, macro) LOOP_UNROLLING_101(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_103(idx, step, macro) LOOP_UNROLLING_102(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_104(idx, step, macro) LOOP_UNROLLING_103(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_105(idx, step, macro) LOOP_UNROLLING_104(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_106(idx, step, macro) LOOP_UNROLLING_105(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_107(idx, step, macro) LOOP_UNROLLING_106(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_108(idx, step, macro) LOOP_UNROLLING_107(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_109(idx, step, macro) LOOP_UNROLLING_108(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_110(idx, step, macro) LOOP_UNROLLING_109(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_111(idx, step, macro) LOOP_UNROLLING_110(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_112(idx, step, macro) LOOP_UNROLLING_111(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_113(idx, step, macro) LOOP_UNROLLING_112(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_114(idx, step, macro) LOOP_UNROLLING_113(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_115(idx, step, macro) LOOP_UNROLLING_114(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_116(idx, step, macro) LOOP_UNROLLING_115(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_117(idx, step, macro) LOOP_UNROLLING_116(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_118(idx, step, macro) LOOP_UNROLLING_117(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_119(idx, step, macro) LOOP_UNROLLING_118(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_120(idx, step, macro) LOOP_UNROLLING_119(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_121(idx, step, macro) LOOP_UNROLLING_120(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_122(idx, step, macro) LOOP_UNROLLING_121(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_123(idx, step, macro) LOOP_UNROLLING_122(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_124(idx, step, macro) LOOP_UNROLLING_123(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_125(idx, step, macro) LOOP_UNROLLING_124(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_126(idx, step, macro) LOOP_UNROLLING_125(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_127(idx, step, macro) LOOP_UNROLLING_126(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_128(idx, step, macro) LOOP_UNROLLING_127(idx, step, macro); UNROLL_INCR(idx, step, macro)
+
+#define LOOP_UNROLLING_STR(type, idx, start, step, num, macro) \
+ { \
+ type idx = start; \
+ LOOP_UNROLLING_##num(idx, step, macro); \
+ }
+#else // !defined(UNROLL_WITH_PRAGMA)
+#define LOOP_UNROLLING_STR(type, idx, start, step, num, macro) \
+ { \
+ _Pragma("unroll") \
+ for(type idx = start; idx < (num * step); idx += step) \
+ { \
+ (macro); \
+ } \
+ }
+#endif // !defined(UNROLL_WITH_PRAGMA)
+#define LOOP_UNROLLING(type, idx, start, step, num, macro) LOOP_UNROLLING_STR(type, idx, start, step, num, macro)
+
+/** Get the get_global_id with partial N0. This function is useful when the dimension is not multiple of N0 and we need to use a partial N0
+ * to avoid out-of-bound read/write
+ *
+ * @note PARTIAL_N0 is used for get_global_id(n) = 0.
+ *
+ * @param[in] IDX get_global_id index (0,1 and 2 only)
+ * @param[in] N0 Number of elements read/written on the IDX direction
+ * @param[in] PARTIAL_N0 Number of elements read/written on the IDX direction for get_global_id(IDX) = 0. If zero,
+ * the Number of elements read/written on the IDX direction for get_global_id(IDX) = 0 is N0
+ */
+#define GET_SPATIAL_IDX(IDX, N0, PARTIAL_N0) (max((int)(get_global_id(IDX) * N0 - (N0 - PARTIAL_N0) % N0), 0))
+
+/** Dot product integet 8bit function
+ *
+ * @note Performs: c += dot(a, b)
+ *
+ * @param[in] A_DATA_TYPE A (lhs) data type
+ * @param[in] B_DATA_TYPE B (rhs) data type
+ * @param[in] C_DATA_TYPE C (accumulator) data type
+ * @param[in] K0 Number of accumulations
+ * @param[in] a OpenCL vector a
+ * @param[in] b OpenCL vector b
+ * @param[in] c Scalar variable c
+ */
+#define DOT_PRODUCT_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c) DOT_PRODUCT_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c)
+#define DOT_PRODUCT_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c) DOT_PRODUCT##K0##_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c)
+#define DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ c += (C_DATA_TYPE)(a) * (C_DATA_TYPE)(b); \
+ })
+#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_khr_integer_dot_product)
+#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += dot((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0)));
+#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += dot((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0));
+#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += dot((a), (b));
+#elif defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_khr_integer_dot_product)
+#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0)), (c));
+#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0), (c));
+#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((a), (b), (c));
+#elif defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
+#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0)));
+#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0));
+#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((a), (b));
+#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
+#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ c += (C_DATA_TYPE)(a).s0 * (C_DATA_TYPE)(b).s0; \
+ c += (C_DATA_TYPE)(a).s1 * (C_DATA_TYPE)(b).s1; \
+ })
+#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c); \
+ c += (C_DATA_TYPE)(a).s2 * (C_DATA_TYPE)(b).s2; \
+ })
+#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, x, y, val) \
+ ({ \
+ val += (C_DATA_TYPE)(x).s0 * (C_DATA_TYPE)(y).s0; \
+ val += (C_DATA_TYPE)(x).s1 * (C_DATA_TYPE)(y).s1; \
+ val += (C_DATA_TYPE)(x).s2 * (C_DATA_TYPE)(y).s2; \
+ val += (C_DATA_TYPE)(x).s3 * (C_DATA_TYPE)(y).s3; \
+ })
+#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
+#define DOT_PRODUCT5_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \
+ DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s4), ((b).s4), c); \
+ })
+#define DOT_PRODUCT6_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \
+ DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s45), ((b).s45), c); \
+ })
+#define DOT_PRODUCT7_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \
+ DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s456), ((b).s456), c); \
+ })
+#define DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).lo), ((b).lo), c); \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).hi), ((b).hi), c); \
+ })
+#define DOT_PRODUCT9_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s8), ((b).s8), c); \
+ })
+#define DOT_PRODUCT10_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89), ((b).s89), c); \
+ })
+#define DOT_PRODUCT11_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89A), ((b).s89A), c); \
+ })
+#define DOT_PRODUCT12_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89AB), ((b).s89AB), c); \
+ })
+#define DOT_PRODUCT13_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89AB), ((b).s89AB), c); \
+ DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).sC), ((b).sC), c); \
+ })
+#define DOT_PRODUCT14_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89AB), ((b).s89AB), c); \
+ DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).sCD), ((b).sCD), c); \
+ })
+#define DOT_PRODUCT15_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89AB), ((b).s89AB), c); \
+ DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).sCDE), ((b).sCDE), c); \
+ })
+#define DOT_PRODUCT16_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).lo), ((b).lo), c); \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).hi), ((b).hi), c); \
+ })
+
+/** Dot product integet 8bit function
+ *
+ * @note Performs: c += dot(a, b)
+ *
+ * @param[in] A_DATA_TYPE A (lhs) data type
+ * @param[in] B_DATA_TYPE B (rhs) data type
+ * @param[in] C_DATA_TYPE C (accumulator) data type
+ * @param[in] K0 Number of accumulations
+ * @param[in] a OpenCL vector a
+ * @param[in] c Scalar variable c
+ */
+#define REDUCE_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c) REDUCE_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c)
+#define REDUCE_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c) DOT_PRODUCT_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, (TILE_VECTOR_TYPE##K0(B_DATA_TYPE))1, c)
+
+/** Load a vector from global memory (tensor)
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] WIDTH Number of dst columns
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image).
+ * In case of cl_image, only WIDTH multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] X Starting X position
+ * @param[in] Y Starting Y position
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ */
+#define V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y) V_LOAD_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y)
+#define V_LOAD_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y) V_LOAD_##TENSOR_TYPE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y)
+#define V_LOAD_BUFFER(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y) \
+ VLOAD(WIDTH) \
+ (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (Y) * (STRIDE_Y)))
+#define V_LOAD_IMAGE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y) READ_IMAGE2D(DATA_TYPE, CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(WIDTH), TENSOR##_img, (X) / 4, (Y))
+
+/** Store a vector in global memory (tensor)
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] WIDTH Number of dst columns
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image).
+ * In case of cl_image, only WIDTH multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] X Starting X position
+ * @param[in] Y Starting Y position
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ * @param[in] VALUES Values to store in memory
+ */
+#define V_STORE(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y, VALUES) V_STORE_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y, VALUES)
+#define V_STORE_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y, VALUES) V_STORE_##TENSOR_TYPE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y, VALUES)
+#define V_STORE_BUFFER(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y, VALUES) \
+ VSTORE(WIDTH) \
+ (VALUES, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (Y) * (STRIDE_Y)))
+#define V_STORE_IMAGE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y, VALUES) WRITE_IMAGE2D(DATA_TYPE, CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(WIDTH), TENSOR##_img, (X) / 4, (Y), VALUES)
+
+/** Load a tile from global memory (tensor)
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] HEIGHT Number of dst rows
+ * @param[in] WIDTH Number of dst columns
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image).
+ * In case of cl_image, only WIDTH multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] X Starting X position
+ * @param[in] Y Starting Y position
+ * @param[in] YI_MULTIPLIER Parameter used to multiply the internal row increment (_i).
+ * In common cases should be 1 but it becomes useful when we want to load rows which are multiple of STRIDE_Y. (e.g. loading the weights of convolution layer).
+ * In this case the address calculation is performed as: (Y + _i * Y_MULTIPLIER) * STRIDE_Y
+ * @param[in] STRIDE_Y Stride Y (in bytes) used to load each row.
+ * @param[out] dst Output tile
+ */
+#define T_LOAD(DATA_TYPE, HEIGHT, WIDTH, TENSOR_TYPE, TENSOR, X, Y, YI_MULTIPLIER, STRIDE_Y, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
+ dst[_i].v = V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, ((Y) + _i * (int)(YI_MULTIPLIER)), STRIDE_Y); \
+ }) \
+ })
+
+/** Store a VECTOR variable (e.g. int4, int8, char2 etc.) to a specified column in the TILE object
+ *
+ * @param[in] VECTOR Vector variable to store
+ * @param[in, out] TILE Tile variable to store to
+ * @param[in] WIDTH Width of the vector variable, also height of the tile (e.g. 2 if char2)
+ * @param[in] COLUMN Column index of the tile
+ */
+#define COPY_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, WIDTH, COLUMN) COPY_VECTOR_TO_TILE_COLUMN_STR(VECTOR, TILE, WIDTH, COLUMN)
+#define COPY_VECTOR_TO_TILE_COLUMN_STR(VECTOR, TILE, WIDTH, COLUMN) COPY_##WIDTH##_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN)
+#define COPY_1_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \
+ ({ \
+ TILE[0].s[COLUMN] = VECTOR; \
+ })
+
+#define COPY_2_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \
+ ({ \
+ TILE[0].s[COLUMN] = VECTOR.s0; \
+ TILE[1].s[COLUMN] = VECTOR.s1; \
+ })
+
+#define COPY_3_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \
+ ({ \
+ TILE[0].s[COLUMN] = VECTOR.s0; \
+ TILE[1].s[COLUMN] = VECTOR.s1; \
+ TILE[2].s[COLUMN] = VECTOR.s2; \
+ })
+
+#define COPY_4_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \
+ ({ \
+ TILE[0].s[COLUMN] = VECTOR.s0; \
+ TILE[1].s[COLUMN] = VECTOR.s1; \
+ TILE[2].s[COLUMN] = VECTOR.s2; \
+ TILE[3].s[COLUMN] = VECTOR.s3; \
+ })
+
+#define COPY_8_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \
+ ({ \
+ TILE[0].s[COLUMN] = VECTOR.s0; \
+ TILE[1].s[COLUMN] = VECTOR.s1; \
+ TILE[2].s[COLUMN] = VECTOR.s2; \
+ TILE[3].s[COLUMN] = VECTOR.s3; \
+ TILE[4].s[COLUMN] = VECTOR.s4; \
+ TILE[5].s[COLUMN] = VECTOR.s5; \
+ TILE[6].s[COLUMN] = VECTOR.s6; \
+ TILE[7].s[COLUMN] = VECTOR.s7; \
+ })
+
+#define COPY_16_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \
+ ({ \
+ TILE[0].s[COLUMN] = VECTOR.s0; \
+ TILE[1].s[COLUMN] = VECTOR.s1; \
+ TILE[2].s[COLUMN] = VECTOR.s2; \
+ TILE[3].s[COLUMN] = VECTOR.s3; \
+ TILE[4].s[COLUMN] = VECTOR.s4; \
+ TILE[5].s[COLUMN] = VECTOR.s5; \
+ TILE[6].s[COLUMN] = VECTOR.s6; \
+ TILE[7].s[COLUMN] = VECTOR.s7; \
+ TILE[8].s[COLUMN] = VECTOR.s8; \
+ TILE[9].s[COLUMN] = VECTOR.s9; \
+ TILE[10].s[COLUMN] = VECTOR.sA; \
+ TILE[11].s[COLUMN] = VECTOR.sB; \
+ TILE[12].s[COLUMN] = VECTOR.sC; \
+ TILE[13].s[COLUMN] = VECTOR.sD; \
+ TILE[14].s[COLUMN] = VECTOR.sE; \
+ TILE[15].s[COLUMN] = VECTOR.sF; \
+ })
+
+/** Load SRC_HEIGHT x SRC_WIDTH elements from global memory (tensor), and store them in a SRC_WIDTH x SRC_HEIGHT tile
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] SRC_HEIGHT Number of source rows, or number of columns of the output tile
+ * @param[in] SRC_WIDTH Number of source columns, or number of tile rows
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image).
+ * In case of cl_image, only WIDTH multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] X Starting X position
+ * @param[in] Y Starting Y position
+ * @param[in] YI_MULTIPLIER Parameter used to multiply the internal row increment (_i).
+ * In common cases should be 1 but it becomes useful when we want to load rows which are multiple of STRIDE_Y.
+ * (e.g. loading the weights of convolution layer).
+ * In this case the address calculation is performed as: (Y + _i * Y_MULTIPLIER) * STRIDE_Y
+ * @param[in] STRIDE_Y Stride Y (in bytes) used to load each row.
+ * @param[out] dst Output tile
+ */
+#define T_LOAD_TRANSPOSED(DATA_TYPE, SRC_HEIGHT, SRC_WIDTH, TENSOR_TYPE, TENSOR, X, Y, YI_MULTIPLIER, STRIDE_Y, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, SRC_HEIGHT, \
+ { \
+ VEC_DATA_TYPE(DATA_TYPE, SRC_WIDTH) \
+ tmp = V_LOAD(DATA_TYPE, SRC_WIDTH, TENSOR_TYPE, TENSOR, X, ((Y) + _i * (int)(YI_MULTIPLIER)), STRIDE_Y); \
+ COPY_VECTOR_TO_TILE_COLUMN(tmp, dst, SRC_WIDTH, _i); \
+ }) \
+ })
+
+/** Load a tile from global memory (tensor) using an indirect Y index tile
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] HEIGHT Number of dst rows
+ * @param[in] WIDTH Number of dst columns
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
+ * In case of cl_image, only WIDTH multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] X Starting X position
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ * @param[in] indirect_y Indirect Y index tile
+ * @param[out] dst Output tile
+ */
+#define T_LOAD_INDIRECT(DATA_TYPE, HEIGHT, WIDTH, TENSOR_TYPE, TENSOR, X, STRIDE_Y, indirect_y, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
+ dst[_i].v = V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, (indirect_y[_i].v), STRIDE_Y); \
+ }) \
+ })
+
+/** Load a tile from global memory (tensor) using an indirect Y index tile and conditionally use a different length for the load
+ *
+ * @note If WIDTH1_CONDITION is true, the load will use the WIDTH1 length for the store
+ * @note The vectors are stored in reverse order so the invalid rows are overwritten by the valid ones
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] HEIGHT Number of dst rows
+ * @param[in] WIDTH0 Store width to use if WIDTH1_CONDITION = false
+ * @param[in] WIDTH1 Store width to use if WIDTH1_CONDITION = true
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image).
+ * In case of cl_image, only WIDTH multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] X Starting X position
+ * @param[in] STRIDE_Y Stride Y (in bytes) used to load each row.
+ * @param[in] WIDTH1_CONDITION Condition to select the WIDTH1 store
+ * @param[out] dst Output tile
+ * @param[out] indirect_y Indirect Y index tile
+ */
+#define T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, HEIGHT, WIDTH0, WIDTH1, TENSOR_TYPE, TENSOR, X, STRIDE_Y, WIDTH1_CONDITION, dst, indirect_y) \
+ ({ \
+ if(WIDTH1_CONDITION) \
+ { \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
+ VLOAD_PARTIAL(WIDTH0, WIDTH1) \
+ (dst[HEIGHT - 1 - _i].v, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \
+ }) \
+ } \
+ else \
+ { \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
+ dst[HEIGHT - 1 - _i].v = V_LOAD(DATA_TYPE, WIDTH0, TENSOR_TYPE, TENSOR, X, (indirect_y[HEIGHT - 1 - _i].v), STRIDE_Y); \
+ }) \
+ } \
+ })
+/** Load a tile from global memory (tensor) when the tensor is stored using a NHWC layout
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] TILE_HEIGHT Number of elements to load from Y (height) dimension
+ * @param[in] TILE_WIDTH Number of elements to load from X (width) dimension
+ * @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
+ * In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] B Starting batch index
+ * @param[in] Y Starting Y index
+ * @param[in] X Starting X index
+ * @param[in] C Starting C index
+ * @param[in] TENSOR_HEIGHT Number of elements to load from Y (height) dimension
+ * @param[in] TENSOR_WIDTH Number of elements to load from X (width) dimension
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ * @param[out] dst Output tile
+ */
+#define T_LOAD_NHWC(DATA_TYPE, TILE_HEIGHT, TILE_WIDTH, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _yk, 0, 1, TILE_HEIGHT, \
+ { \
+ LOOP_UNROLLING(int, _xk, 0, 1, TILE_WIDTH, \
+ { \
+ int _src_y = (X) + _xk + ((Y) + _yk) * (TENSOR_WIDTH); \
+ _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \
+ int _src_valid_y = (((X) + _xk) >= 0 && ((X) + _xk) < (int)(TENSOR_WIDTH) && ((Y) + _yk) >= 0 && ((Y) + _yk) < (int)(TENSOR_HEIGHT)); \
+ if(_src_valid_y != 0) \
+ { \
+ dst[_xk + _yk * (TILE_WIDTH)].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
+ } \
+ }) \
+ }) \
+ })
+
+/** Load a tile from global memory (tensor) when the tensor is stored using a NHWC layout with dilation for the X and Y increments
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] TILE_HEIGHT Number of elements to load from Y (height) dimension
+ * @param[in] TILE_WIDTH Number of elements to load from X (width) dimension
+ * @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
+ * In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] B Starting batch index
+ * @param[in] Y Starting Y index
+ * @param[in] X Starting X index
+ * @param[in] C Starting C index
+ * @param[in] TENSOR_HEIGHT Number of elements to load from Y (height) dimension
+ * @param[in] TENSOR_WIDTH Number of elements to load from X (width) dimension
+ * @param[in] DILATION_X Dilation for the X increment
+ * @param[in] DILATION_Y Dilation for the Y increment
+ * @param[in] BOUNDARY_CHECK Boundary check flag. If true, it checks for any out-of-bound reads
+ * @param[out] dst Output tile
+ */
+#define T_LOAD_NHWC_WITH_DILATION(DATA_TYPE, TILE_HEIGHT, TILE_WIDTH, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, DILATION_X, DILATION_Y, BOUNDARY_CHECK, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _yk, 0, 1, TILE_HEIGHT, \
+ { \
+ LOOP_UNROLLING(int, _xk, 0, 1, TILE_WIDTH, \
+ { \
+ int _src_y = (X) + _xk * (DILATION_X); \
+ int _src_z = ((Y) + _yk * (DILATION_Y)); \
+ int _src_w = (B); \
+ bool _src_valid_y = (((X) + _xk * (DILATION_X)) >= 0) && (((X) + _xk * (DILATION_X)) < (int)(TENSOR_WIDTH)) && (((Y) + _yk * (DILATION_Y)) >= 0) && (((Y) + _yk * (DILATION_Y)) < (int)(TENSOR_HEIGHT)); \
+ if(!(BOUNDARY_CHECK)) \
+ { \
+ dst[_xk + _yk * (TILE_WIDTH)].v = VLOAD(TILE_CHANNELS) \
+ (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (C) * sizeof(DATA_TYPE) + (_src_y) * (TENSOR##_stride_y) + (_src_z) * (TENSOR##_stride_z) + (_src_w) * (TENSOR##_stride_w))); \
+ } \
+ else \
+ { \
+ if(_src_valid_y) \
+ { \
+ dst[_xk + _yk * (TILE_WIDTH)].v = VLOAD(TILE_CHANNELS) \
+ (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (C) * sizeof(DATA_TYPE) + (_src_y) * (TENSOR##_stride_y) + (_src_z) * (TENSOR##_stride_z) + (_src_w) * (TENSOR##_stride_w))); \
+ } \
+ } \
+ }) \
+ }) \
+ })
+
+/** Load a tile from global memory (tensor) when the tensor is stored using a NHWC layout using indirect X and Y coordinates
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] TILE_AREA Number of elements to load from Y (height) dimension * Number of elements to load from X (width) dimension
+ * @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
+ * In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] B Starting batch index
+ * @param[in] Y Starting Y index
+ * @param[in] X Starting X index
+ * @param[in] C Starting C index
+ * @param[in] TENSOR_WIDTH Number of elements to load from X (width) dimension
+ * @param[in] TENSOR_HEIGHT Number of elements to load from Y (height) dimension
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ * @param[out] xi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect X coordinate
+ * @param[out] yi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Y coordinate
+ * @param[out] dst Output tile
+ */
+#define T_LOAD_NHWC_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, xi, yi, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
+ { \
+ int _src_y = (X) + xi[_i].v + ((Y) + yi[_i].v) * (TENSOR_WIDTH); \
+ _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \
+ int _src_valid_y = (((X) + xi[_i].v) >= 0 && ((X) + xi[_i].v) < (int)(TENSOR_WIDTH) && ((Y) + yi[_i].v) >= 0 && ((Y) + yi[_i].v) < (int)(TENSOR_HEIGHT)); \
+ if(_src_valid_y != 0) \
+ { \
+ dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
+ } \
+ }) \
+ })
+
+/** Load a tile from global memory (tensor) using an indirect buffer for the Y coordinates
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] TILE_AREA Number of elements to load from Y (height) dimension * Number of elements to load from X (width) dimension
+ * @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image).
+ * When TENSOR_TYPE=IMAGE, the if condition for the out-of-bound check can be skipped
+ * In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] C Starting C index
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ * @param[out] yi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Y coordinate
+ * 16 is the maximum indirect buffer size.
+ * @param[out] dst Output tile
+ */
+#define T_LOAD2D_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) T_LOAD2D_INDIRECT_STR(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst)
+#define T_LOAD2D_INDIRECT_STR(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) T_LOAD2D_INDIRECT_##TENSOR_TYPE(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst)
+#define T_LOAD2D_INDIRECT_BUFFER(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
+ { \
+ if(yi[0].s[_i] >= 0) \
+ { \
+ dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[0].s[_i], STRIDE_Y); \
+ } \
+ }) \
+ })
+
+#define T_LOAD2D_INDIRECT_IMAGE(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
+ { \
+ dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[0].s[_i], STRIDE_Y); \
+ }) \
+ })
+
+/** Load a tile from global memory (tensor) when the tensor is stored using a NDHWC layout using indirect X, Y and Z coordinates
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] TILE_AREA Number of elements to load from Y (height) dimension * Number of elements to load from X (width) dimension
+ * @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
+ * In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] B Starting batch index
+ * @param[in] Z Starting Z index
+ * @param[in] Y Starting Y index
+ * @param[in] X Starting X index
+ * @param[in] C Starting C index
+ * @param[in] TENSOR_WIDTH Number of elements to load from X (width) dimension
+ * @param[in] TENSOR_HEIGHT Number of elements to load from Y (height) dimension
+ * @param[in] TENSOR_DEPTH Number of elements to load from Z (depth) dimension
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ * @param[out] xi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect X coordinate
+ * @param[out] yi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Y coordinate
+ * @param[out] zi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Z coordinate
+ * @param[out] dst Output tile
+ */
+#define T_LOAD_NDHWC_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Z, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, TENSOR_DEPTH, STRIDE_Y, xi, yi, zi, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
+ { \
+ int _src_y = (X) + xi[_i].v + ((Y) + yi[_i].v) * (TENSOR_WIDTH) + ((Z) + zi[_i].v) * (TENSOR_WIDTH * TENSOR_HEIGHT); \
+ _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT) * (int)(TENSOR_DEPTH); \
+ int _src_valid_y = (((X) + xi[_i].v) >= 0 && ((X) + xi[_i].v) < (int)(TENSOR_WIDTH) && ((Y) + yi[_i].v) >= 0 && ((Y) + yi[_i].v) < (int)(TENSOR_HEIGHT) \
+ && ((Z) + zi[_i].v) >= 0 && ((Z) + zi[_i].v) < (int)(TENSOR_DEPTH)); \
+ if(_src_valid_y != 0) \
+ { \
+ dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
+ } \
+ }) \
+ })
+
+/** Store a tile to global memory (tensor) using an indirect Y index tile and conditionally use a different length for the store
+ *
+ * @note If WIDTH1_CONDITION is true, the store will use the WIDTH1 length for the store
+ * @note The vectors are stored in reverse order so the invalid rows are overwritten by the valid ones
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] HEIGHT Number of src rows
+ * @param[in] WIDTH0 Store width to use if WIDTH1_CONDITION = false
+ * @param[in] WIDTH1 Store width to use if WIDTH1_CONDITION = true
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
+ * cl_image is not supported.
+ * @param[in] TENSOR Tensor basename
+ * @param[in] X Starting X position
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ * @param[in] WIDTH1_CONDITION Condition to select the WIDTH1 store
+ * @param[in] src Input tile
+ * @param[in] indirect_y Indirect Y index tile
+ */
+#define T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, HEIGHT, WIDTH0, WIDTH1, TENSOR_TYPE, TENSOR, X, STRIDE_Y, WIDTH1_CONDITION, src, indirect_y) \
+ ({ \
+ if(WIDTH1_CONDITION) \
+ { \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
+ VSTORE_PARTIAL(WIDTH0, WIDTH1) \
+ (CONVERT(src[HEIGHT - 1 - _i].v, VEC_DATA_TYPE(DATA_TYPE, WIDTH0)), 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \
+ }) \
+ } \
+ else \
+ { \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
+ VSTORE(WIDTH0) \
+ (CONVERT(src[HEIGHT - 1 - _i].v, VEC_DATA_TYPE(DATA_TYPE, WIDTH0)), 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \
+ }) \
+ } \
+ })
+
+/** Offset correction for the QASYMM8 computation
+ *
+ * @param[in] ACC_DATA_TYPE Accumulator data type
+ * @param[in] M0 Number of src/dst rows
+ * @param[in] N0 Number of src/dst columns
+ * @param[in] K0 Number of src columns
+ * @param[in] SRC_OFFSET Source quantization offset
+ * @param[in] WEI_OFFSET Weights quantization shift
+ * @param[in] lhs LHS tile
+ * @param[in] rhs RHS tile
+ * @param[out] dst DST tile
+ */
+#define T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, lhs, rhs, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ ACC_DATA_TYPE _tm = 0; \
+ LOOP_UNROLLING(int, _k0, 0, 1, K0, \
+ { \
+ _tm += ((ACC_DATA_TYPE)lhs[_m0].s[_k0] * (ACC_DATA_TYPE)WEI_OFFSET); \
+ }) \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ dst[_m0].s[_n0] += _tm; \
+ LOOP_UNROLLING(int, _k0, 0, 1, K0, \
+ { \
+ dst[_m0].s[_n0] += ((ACC_DATA_TYPE)rhs[_n0].s[_k0] * (ACC_DATA_TYPE)SRC_OFFSET); \
+ }) \
+ }) \
+ }) \
+ })
+
+/** 8-bit quantization with fixed-point scale
+ *
+ * @param[in] SRC_DATA_TYPE SRC data type
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] QUANTIZATION_TYPE Quantization type (PER_TENSOR or PER_CHANNEL)
+ * @param[in] M0 Number of src/dst rows
+ * @param[in] N0 Number of src/dst columns
+ * @param[in] DST_OFFSET Quantization offset used for both the per-tensor and per-channel quantization
+ * @param[in] DST_SHIFT Quantization shift for the per-tensor quantization
+ * @param[in] DST_MULTIPLIER Quantization multiplier for the per-tensor quantization
+ * @param[in] src Input tile
+ * @param[in] dst_multipliers Output multipliers tile for the per-channel quantization
+ * @param[in] dst_shifts Output shift tile for the per-channel quantization
+ * @param[out] dst Output tile
+ */
+#define T_QUANTIZE8(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) T_QUANTIZE8_STR(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst)
+#define T_QUANTIZE8_STR(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) T_QUANTIZE8_##QUANTIZATION_TYPE(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst)
+
+/** 8-bit per-tensor quantization with fixed-point scale
+ *
+ * @param[in] SRC_DATA_TYPE SRC data type
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] M0 Number of src/dst rows
+ * @param[in] N0 Number of src/dst columns
+ * @param[in] DST_OFFSET Quantization offset
+ * @param[in] DST_SHIFT Quantization shift for the per-tensor quantization
+ * @param[in] DST_MULTIPLIER Quantization multiplier for the per-tensor quantization
+ * @param[in] src Input tile
+ * @param[in] dst_multipliers (unused)
+ * @param[in] dst_shifts (unused)
+ * @param[out] dst Output tile
+ */
+#define T_QUANTIZE8_PER_TENSOR(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ SRC_DATA_TYPE _tmp = 0; \
+ SRC_DATA_TYPE _src = src[_m0].s[_n0]; \
+ _src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-DST_SHIFT)), ((SRC_DATA_TYPE)DST_SHIFT < (SRC_DATA_TYPE)0)); \
+ SRC_DATA_TYPE overflow = _src == DST_MULTIPLIER && _src == INT_MIN; \
+ long a_64 = (long)(_src); \
+ long b_64 = (long)(DST_MULTIPLIER); \
+ long ab_64 = a_64 * b_64; \
+ long mask1 = 1 << 30; \
+ long mask2 = 1 - (1 << 30); \
+ long is_positive_or_zero = ab_64 >= 0; \
+ long nudge = select(mask2, mask1, is_positive_or_zero); \
+ SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \
+ _tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \
+ if(DST_SHIFT >= 0) \
+ { \
+ long mask = ((((int)1) << DST_SHIFT) - (long)1); \
+ long threshold = _tmp < (int)0 ? (mask >> 1) + (long)1 : (mask >> 1) + 0; \
+ _tmp = (_tmp & mask) > threshold ? (_tmp >> DST_SHIFT) + (int)1 : (_tmp >> DST_SHIFT); \
+ } \
+ _tmp += DST_OFFSET; \
+ dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
+ }) \
+ }) \
+ })
+
+/** 8-bit per-channel quantization with fixed-point scale
+ *
+ * @param[in] SRC_DATA_TYPE SRC data type
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] M0 Number of src/dst rows
+ * @param[in] N0 Number of src/dst columns
+ * @param[in] DST_OFFSET Quantization offset
+ * @param[in] DST_SHIFT (unused)
+ * @param[in] DST_MULTIPLIER (unused)
+ * @param[in] src Input tile
+ * @param[in] dst_multipliers Output multipliers tile for the per-channel quantization
+ * @param[in] dst_shifts Output shift tile for the per-channel quantization
+ * @param[out] dst Output tile
+ */
+#define T_QUANTIZE8_PER_CHANNEL(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ SRC_DATA_TYPE _tmp = 0; \
+ SRC_DATA_TYPE _tmp2 = 0; \
+ SRC_DATA_TYPE _src = src[_m0].s[_n0]; \
+ SRC_DATA_TYPE _dst_multiplier = dst_multipliers[0].s[_n0]; \
+ SRC_DATA_TYPE _dst_shift = dst_shifts[0].s[_n0]; \
+ _src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-_dst_shift)), ((SRC_DATA_TYPE)_dst_shift < (SRC_DATA_TYPE)0)); \
+ SRC_DATA_TYPE overflow = _src == _dst_multiplier && _src == INT_MIN; \
+ long a_64 = (long)(_src); \
+ long b_64 = (long)(_dst_multiplier); \
+ long ab_64 = a_64 * b_64; \
+ long mask1 = 1 << 30; \
+ long mask2 = 1 - (1 << 30); \
+ long is_positive_or_zero = ab_64 >= 0; \
+ long nudge = select(mask2, mask1, is_positive_or_zero); \
+ SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \
+ _tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \
+ long mask = ((((int)1) << _dst_shift) - (int)1); \
+ long threshold = (mask >> 1) + any(_tmp); \
+ _tmp2 = _tmp >> _dst_shift; \
+ _tmp2 += select(0, 1, (_tmp & mask) > threshold); \
+ _tmp = select(_tmp, _tmp2, _dst_shift >= 0); \
+ _tmp += DST_OFFSET; \
+ dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
+ }) \
+ }) \
+ })
+
+/** Quantized the 8-bit tile with fixed-point scale for asymmetric
+ *
+ * @param[in] SRC_DATA_TYPE SRC data type
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] M0 Number of src/dst rows
+ * @param[in] N0 Number of src/dst columns
+ * @param[in] DST_OFFSET Quantization offset used for both the per-tensor and per-channel quantization
+ * @param[in] DST_SHIFT Quantization shift for the per-tensor quantization
+ * @param[in] DST_MULTIPLIER Quantization multiplier for the per-tensor quantization
+ * @param[in] src Input tile
+ * @param[out] dst Output tile
+ */
+#define T_QUANTIZE8_ASYMMETRIC(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ SRC_DATA_TYPE _tmp = 0; \
+ SRC_DATA_TYPE _src = src[_m0].s[_n0]; \
+ _src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-DST_SHIFT)), ((SRC_DATA_TYPE)DST_SHIFT < (SRC_DATA_TYPE)0)); \
+ SRC_DATA_TYPE overflow = _src == DST_MULTIPLIER && _src == INT_MIN; \
+ long a_64 = (long)(_src); \
+ long b_64 = (long)(DST_MULTIPLIER); \
+ long ab_64 = a_64 * b_64; \
+ long mask1 = 1 << 30; \
+ long mask2 = 1 - (1 << 30); \
+ long is_positive_or_zero = ab_64 >= 0; \
+ long nudge = select(mask2, mask1, is_positive_or_zero); \
+ SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \
+ _tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \
+ if(DST_SHIFT >= 0) \
+ { \
+ long mask = ((((int)1) << DST_SHIFT) - (int)1); \
+ long threshold = _tmp < (int)0 ? (mask >> 1) + (long)1 : (mask >> 1) + 0; \
+ _tmp = (_tmp & mask) > threshold ? (_tmp >> DST_SHIFT) + (int)1 : (_tmp >> DST_SHIFT); \
+ } \
+ _tmp += DST_OFFSET; \
+ dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
+ }) \
+ }) \
+ })
+
+/** Conditional rowset (memset by row)
+ *
+ * @note Set the row to VALUE_TO_SET if the corresponding mask == 0
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] M0 Number of LHS rows
+ * @param[in] N0 Number of LHS columns
+ * @param[in] VALUE_TO_SET Value to set the row
+ * @param[in, out] a Input/output tile
+ * @param[out] mask Mask to check for setting the row to VALUE_TO_SET
+ */
+#define T_ROWSET_MASK(DATA_TYPE, M0, N0, VALUE_TO_SET, a, mask) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ a[_m0].s[_n0] = select((DATA_TYPE)(a[_m0].s[_n0]), (DATA_TYPE)(VALUE_TO_SET), (SELECT_DATA_TYPE(DATA_TYPE))(mask[_m0].v == (DATA_TYPE)0)); \
+ }) \
+ }) \
+ })
+
+/** Element-wise activation for floating point types
+ *
+ * @note Performs: activation(LHS) = DST
+ *
+ * @param[in] DATA_TYPE SRC/DST data type
+ * @param[in] M0 Number of SRC/DST rows
+ * @param[in] N0 Number of SRC/DST columns
+ * @param[in] ACTIVATION_TYPE Activation type
+ * @param[in] A_VAL A value used for the activation (e.g. tanh_op, brelu,..)
+ * @param[in] B_VAL B value used for the activation (e.g. tanh_op, brelu,..)
+ * @param[out] src SRC tile
+ * @param[out] dst DST tile
+ */
+#define T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, src, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, N0, src[_m0].v, A_VAL, B_VAL); \
+ }) \
+ })
+
+
+// NOTE : A_VAL and B_VAL should be quantized values (using same quantization info as x)
+// RELU Activation
+#define relu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (max((DATA_TYPE)ZERO_POINT, x))
+// Bounded RELU Activation
+#define brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)ZERO_POINT, x)))
+// Lower Upper Bounded RELU Activation
+#define lu_brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))
+// Hard Swish Activation
+#define hard_swish_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (x * ((min(max((DATA_TYPE)(x + (DATA_TYPE)3.f), (DATA_TYPE)0.f), (DATA_TYPE)6.f)) * (DATA_TYPE)0.166666667f))
+// Identity Activation
+#define identity_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (x)
+
+#define ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) op##_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x)
+#define ACTIVATION_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x)
+
+#define V_ADD(A_VAL, B_VAL) ((A_VAL) + (B_VAL))
+#define V_SUB(A_VAL, B_VAL) ((A_VAL) - (B_VAL))
+#define V_DIV(A_VAL, B_VAL) ((A_VAL) / (B_VAL))
+#define V_MUL(A_VAL, B_VAL) ((A_VAL) * (B_VAL))
+
+/** Element-wise activation for quantized types
+ *
+ * @note Performs: activation(LHS) = DST
+ *
+ * @param[in] DATA_TYPE SRC/DST data type
+ * @param[in] M0 Number of SRC/DST rows
+ * @param[in] N0 Number of SRC/DST columns
+ * @param[in] ACTIVATION_TYPE Activation type
+ * @param[in] ZERO_POINT The zero value to consider in the computation
+ * @param[in] A_VAL Quantized A value used for the activation (e.g. tanh_op, brelu,..)
+ * @param[in] B_VAL Quantized B value used for the activation (e.g. tanh_op, brelu,..)
+ * @param[out] src SRC tile
+ * @param[out] dst DST tile
+ */
+#define T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, src, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = ACTIVATION_QUANTIZED(ACTIVATION_TYPE, DATA_TYPE, N0, ZERO_POINT, A_VAL, B_VAL, src[_m0].v); \
+ }) \
+ })
+
+/** Element-wise addition between two tiles
+ *
+ * @note Performs: LHS + RHS = DST
+ *
+ * @param[in] DATA_TYPE LHS/RHS/DST data type
+ * @param[in] M0 Number of LHS rows
+ * @param[in] N0 Number of LHS columns
+ * @param[in] lhs LHS tile
+ * @param[in] rhs Constant RHS tile
+ * @param[out] dst DST tile
+ */
+#define T_ADD(DATA_TYPE, M0, N0, lhs, rhs, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = lhs[_m0].v + rhs[_m0].v; \
+ }) \
+ })
+
+/** Element-wise addition with a constant value
+ *
+ * @note Performs: LHS + constant = DST
+ *
+ * @param[in] DATA_TYPE LHS/RHS/DST data type
+ * @param[in] M0 Number of LHS rows
+ * @param[in] N0 Number of LHS columns
+ * @param[in] lhs LHS tile
+ * @param[in] rhs_constant Constant value
+ * @param[out] dst DST tile
+ */
+#define T_ADD_CONSTANT(DATA_TYPE, M0, N0, lhs, rhs_constant, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = lhs[_m0].v + (DATA_TYPE)rhs_constant; \
+ }) \
+ })
+
+#define T_ELTWISE_BROADCAST_ADD_X(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+#define T_ELTWISE_BROADCAST_LHS_X_ADD(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_LHS_X(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+#define T_ELTWISE_BROADCAST_RHS_X_ADD(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+
+#define T_ELTWISE_BROADCAST_LHS_X_SUB(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_LHS_X(V_SUB, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+#define T_ELTWISE_BROADCAST_RHS_X_SUB(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_SUB, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+
+#define T_ELTWISE_BROADCAST_DIV_X(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_DIV, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+
+#define T_ELTWISE_BROADCAST_LHS_X_MUL(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_LHS_X(V_MUL, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+#define T_ELTWISE_BROADCAST_RHS_X_MUL(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_MUL, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+
+/** Element-wise scale with a constant value
+ *
+ * @note Performs: LHS * constant = DST
+ *
+ * @param[in] DATA_TYPE LHS/RHS/DST data type
+ * @param[in] M0 Number of LHS rows
+ * @param[in] N0 Number of LHS columns
+ * @param[in] lhs LHS tile
+ * @param[in] rhs_constant Constant value
+ * @param[out] dst DST tile
+ */
+#define T_SCALE_CONSTANT(DATA_TYPE, M0, N0, lhs, rhs_constant, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = lhs[_m0].v * (DATA_TYPE)rhs_constant; \
+ }) \
+ })
+
+/** Element-wise operation with RHS broadcasted (RHS has the X dimension only)
+ *
+ * @note Performs: LHS OP RHS[broadcasted] = DST
+ * @note Both tiles must have same data type
+ *
+ * @param[in] T_ELWISE_OP Elementwise operator to perform
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] M0 Number of LHS rows
+ * @param[in] N0 Number of LHS columns
+ * @param[in] lhs LHS tile
+ * @param[in] rhs RHS tile
+ * @param[out] dst DST tile
+ */
+#define T_ELTWISE_BROADCAST_X(T_ELWISE_OP, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = T_ELWISE_OP(CONVERT(lhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)), CONVERT(rhs[0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \
+ }) \
+ })
+
+/** Element-wise operation with LHS broadcasted (LHS has the X dimension only)
+ *
+ * @note Performs: LHS[broadcasted] OP RHS = DST
+ * @note Both tiles must have same data type
+ *
+ * @param[in] T_ELWISE_OP Elementwise operator to perform
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] M0 Number of RHS rows
+ * @param[in] N0 Number of RHS columns
+ * @param[in] lhs LHS tile
+ * @param[in] rhs RHS tile
+ * @param[out] dst DST tile
+ */
+#define T_ELTWISE_BROADCAST_LHS_X(T_ELWISE_OP, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = T_ELWISE_OP(CONVERT(lhs[0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)), CONVERT(rhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \
+ }) \
+ })
+
+#define T_ELTWISE_ADD(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+#define T_ELTWISE_SUB(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_SUB, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+#define T_ELTWISE_DIV(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_DIV, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+#define T_ELTWISE_MUL(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_MUL, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
+
+/** Element-wise operation between two tiles (LHS and RHS)
+ *
+ * @note Performs: LHS OP RHS = DST
+ * @note Both tiles must have same data type
+ *
+ * @param[in] T_ELWISE_OP Elementwise operator to perform
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] M0 Number of LHS rows
+ * @param[in] N0 Number of LHS columns
+ * @param[in] lhs LHS tile
+ * @param[in] rhs RHS tile
+ * @param[out] dst DST tile
+ */
+#define T_ELTWISE(T_ELWISE_OP, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = T_ELWISE_OP(CONVERT(lhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)), CONVERT(rhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \
+ }) \
+ })
+
+/** Floor operation on a tile
+ *
+ * @note Performs: floor(SRC) = DST
+ * @note Both tiles must have same data type
+ *
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] M0 Number of SRC rows
+ * @param[in] N0 Number of SRC columns
+ * @param[in] src LHS tile
+ * @param[out] dst DST tile
+ */
+#define T_FLOOR(DST_DATA_TYPE, M0, N0, src, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = floor(CONVERT(src[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \
+ }) \
+ })
+
+/** Matrix multiplication
+ *
+ * @note Performs: LHS X RHS + DST = DST
+ *
+ * @param[in] LHS_DATA_TYPE LHS tile data type
+ * @param[in] RHS_DATA_TYPE RHS tile data type
+ * @param[in] DST_DATA_TYPE RHS tile data type
+ * @param[in] M0 Number of LHS rows
+ * @param[in] N0 Number of RHS columns
+ * @param[in] K0 Number of LHS columns
+ * @param[in] LHS_LAYOUT LHS layout (T= transposed, NT= not transposed)
+ * @param[in] RHS_LAYOUT RHS layout (T= transposed, NT= not transposed)
+ * @param[in] lhs LHS tile
+ * @param[in] rhs RHS tile
+ * @param[in, out] dst DST tile
+ *
+ * @note For Int8/UInt8 multiplications, we only have T_MMUL_NT_T because we need
+ * the multiply the rows of Lhs and Rhs tensors to utilize dot product extension.
+ * Addition of other versions requires dealing with on the fly transposition of
+ * these tile elements and therefore is not favored.
+ */
+#define T_MMUL(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, LHS_LAYOUT, RHS_LAYOUT, lhs, rhs, dst) T_MMUL_##LHS_LAYOUT##_##RHS_LAYOUT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_char_char_int(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_uchar_uchar_uint(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_uchar_uchar_int(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
+ { \
+ LOOP_UNROLLING(int, _m, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n, 0, 1, N0, \
+ { \
+ LOOP_UNROLLING(int, _k, 0, 1, K0, \
+ { \
+ dst[_m].s[_n] = fma((DST_DATA_TYPE)(lhs[_m].s[_k]), (DST_DATA_TYPE)(rhs[_n].s[_k]), dst[_m].s[_n]); \
+ }) \
+ }) \
+ }) \
+ }
+
+#define T_MMUL_NT_NT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_NT_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_NT_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_NT_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
+ { \
+ LOOP_UNROLLING(int, _m, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _k, 0, 1, K0, \
+ { \
+ dst[_m].v = fma((DST_DATA_TYPE)(lhs[_m].s[_k]), (rhs[_k].v), dst[_m].v); \
+ }) \
+ }) \
+ }
+
+#define T_MMUL_T_NT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_NT_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_T_NT_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_T_NT_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_T_NT_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_T_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
+ { \
+ LOOP_UNROLLING(int, _m, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n, 0, 1, N0, \
+ { \
+ LOOP_UNROLLING(int, _k, 0, 1, K0, \
+ { \
+ dst[_m].s[_n] = fma((DST_DATA_TYPE)(lhs[_k].s[_m]), (DST_DATA_TYPE)(rhs[_k].s[_n]), dst[_m].s[_n]); \
+ }) \
+ }) \
+ }) \
+ }
+
+#define T_MMUL_T_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_T_T_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_T_T_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_T_T_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_T_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
+ { \
+ LOOP_UNROLLING(int, _m, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n, 0, 1, N0, \
+ { \
+ LOOP_UNROLLING(int, _k, 0, 1, K0, \
+ { \
+ dst[_m].s[_n] = fma((DST_DATA_TYPE)(lhs[_k].s[_m]), (DST_DATA_TYPE)(rhs[_n].s[_k]), dst[_m].s[_n]); \
+ }) \
+ }) \
+ }) \
+ }
+
+#define T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n, 0, 1, N0, \
+ { \
+ DOT_PRODUCT_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, K0, (lhs[_m].v), (rhs[_n].v), dst[_m].s[_n]); \
+ }) \
+ }) \
+ })
+
+#endif /* ACL_SRC_CORE_CL_CL_KERNELS_TILE_HELPERS */
diff --git a/src/core/CL/cl_kernels/warp_helpers.h b/src/core/CL/cl_kernels/warp_helpers.h
index 64828259d0..6595bd1981 100644
--- a/src/core/CL/cl_kernels/warp_helpers.h
+++ b/src/core/CL/cl_kernels/warp_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 Arm Limited.
+ * Copyright (c) 2016, 2017, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,14 +31,15 @@
* @param[in] border_size Border size of the image
*
*/
-inline const float8 clamp_to_border_with_size(float8 coords, const float width, const float height, const float border_size)
+inline const float8
+clamp_to_border_with_size(float8 coords, const float width, const float height, const float border_size)
{
const float4 clamped_x = clamp(coords.even, 0.0f - border_size, width - 1 + border_size);
const float4 clamped_y = clamp(coords.odd, 0.0f - border_size, height - 1 + border_size);
- return (float8)(clamped_x.s0, clamped_y.s0, clamped_x.s1, clamped_y.s1, clamped_x.s2, clamped_y.s2, clamped_x.s3, clamped_y.s3);
+ return (float8)(clamped_x.s0, clamped_y.s0, clamped_x.s1, clamped_y.s1, clamped_x.s2, clamped_y.s2, clamped_x.s3,
+ clamped_y.s3);
}
-/* FIXME(COMPMID-682): Clamp border properly in UNDEFINED border mode in Warp, Scale, Remap */
/** Clamps the given coordinates to the borders.
*
* @param[in] coords Vector of 2D coordinates to clamp. Even positions are X coords, odd positions are Y coords.
@@ -64,12 +65,6 @@ inline const VEC_DATA_TYPE(DATA_TYPE, 4) read_texels4(const Image *in, const int
*((__global DATA_TYPE *)offset(in, coords.s6, coords.s7)));
}
-/** Returns the current thread coordinates. */
-inline const float2 get_current_coords()
-{
- return (float2)(get_global_id(0) * 4, get_global_id(1));
-}
-
/** Given a texel coordinates this function will return the following array of coordinates:
* [ P, right neighbour, below neighbour, below right neighbour ]
*
@@ -81,7 +76,8 @@ inline const float2 get_current_coords()
*/
inline const float8 get_neighbour_coords(const float2 coord)
{
- return (float8)(/*tl*/ coord.s0, coord.s1, /*tr*/ coord.s0 + 1, coord.s1, /*bl*/ coord.s0, coord.s1 + 1, /*br*/ coord.s0 + 1, coord.s1 + 1);
+ return (float8)(/*tl*/ coord.s0, coord.s1, /*tr*/ coord.s0 + 1, coord.s1, /*bl*/ coord.s0, coord.s1 + 1,
+ /*br*/ coord.s0 + 1, coord.s1 + 1);
}
/** Computes the bilinear interpolation for each set of coordinates in the vector coords and returns the values
@@ -92,41 +88,41 @@ inline const float8 get_neighbour_coords(const float2 coord)
* @param[in] height Height of the image
* @param[in] border_size Border size
*/
-inline const VEC_DATA_TYPE(DATA_TYPE, 4) bilinear_interpolate_with_border(const Image *in, const float8 coords, const float width, const float height, const float border_size)
+inline const VEC_DATA_TYPE(DATA_TYPE, 4) bilinear_interpolate_with_border(
+ const Image *in, const float8 coords, const float width, const float height, const float border_size)
{
// If any of the 4 texels is out of the image's boundaries we use the border value (REPLICATE or CONSTANT) for any texel out of the image.
// Sets the 4x4 coordinates for each of the four input texels
const float8 fc = floor(coords);
- const float16 c1 = (float16)(
- clamp_to_border_with_size(get_neighbour_coords((float2)(fc.s0, fc.s1)), width, height, border_size),
- clamp_to_border_with_size(get_neighbour_coords((float2)(fc.s2, fc.s3)), width, height, border_size));
- const float16 c2 = (float16)(
- clamp_to_border_with_size(get_neighbour_coords((float2)(fc.s4, fc.s5)), width, height, border_size),
- clamp_to_border_with_size(get_neighbour_coords((float2)(fc.s6, fc.s7)), width, height, border_size));
+ const float16 c1 =
+ (float16)(clamp_to_border_with_size(get_neighbour_coords((float2)(fc.s0, fc.s1)), width, height, border_size),
+ clamp_to_border_with_size(get_neighbour_coords((float2)(fc.s2, fc.s3)), width, height, border_size));
+ const float16 c2 =
+ (float16)(clamp_to_border_with_size(get_neighbour_coords((float2)(fc.s4, fc.s5)), width, height, border_size),
+ clamp_to_border_with_size(get_neighbour_coords((float2)(fc.s6, fc.s7)), width, height, border_size));
// Loads the values from the input image
const float16 t = (float16)(
- /* tl, tr, bl, br */
- * ((__global DATA_TYPE *)offset(in, c1.s0, c1.s1)), *((__global DATA_TYPE *)offset(in, c1.s2, c1.s3)),
- *((__global DATA_TYPE *)offset(in, c1.s4, c1.s5)), *((__global DATA_TYPE *)offset(in, c1.s6, c1.s7)),
- *((__global DATA_TYPE *)offset(in, c1.s8, c1.s9)), *((__global DATA_TYPE *)offset(in, c1.sa, c1.sb)),
- *((__global DATA_TYPE *)offset(in, c1.sc, c1.sd)), *((__global DATA_TYPE *)offset(in, c1.se, c1.sf)),
- *((__global DATA_TYPE *)offset(in, c2.s0, c2.s1)), *((__global DATA_TYPE *)offset(in, c2.s2, c2.s3)),
- *((__global DATA_TYPE *)offset(in, c2.s4, c2.s5)), *((__global DATA_TYPE *)offset(in, c2.s6, c2.s7)),
- *((__global DATA_TYPE *)offset(in, c2.s8, c2.s9)), *((__global DATA_TYPE *)offset(in, c2.sa, c2.sb)),
- *((__global DATA_TYPE *)offset(in, c2.sc, c2.sd)), *((__global DATA_TYPE *)offset(in, c2.se, c2.sf)));
- const float8 a = coords - fc;
- const float8 b = ((float8)(1.f)) - a;
- const float4 fr = (float4)(
- ((t.s0 * b.s0 * b.s1) + (t.s1 * a.s0 * b.s1) + (t.s2 * b.s0 * a.s1) + (t.s3 * a.s0 * a.s1)),
- ((t.s4 * b.s2 * b.s3) + (t.s5 * a.s2 * b.s3) + (t.s6 * b.s2 * a.s3) + (t.s7 * a.s2 * a.s3)),
- ((t.s8 * b.s4 * b.s5) + (t.s9 * a.s4 * b.s5) + (t.sa * b.s4 * a.s5) + (t.sb * a.s4 * a.s5)),
- ((t.sc * b.s6 * b.s7) + (t.sd * a.s6 * b.s7) + (t.se * b.s6 * a.s7) + (t.sf * a.s6 * a.s7)));
+ /* tl, tr, bl, br */
+ *((__global DATA_TYPE *)offset(in, c1.s0, c1.s1)), *((__global DATA_TYPE *)offset(in, c1.s2, c1.s3)),
+ *((__global DATA_TYPE *)offset(in, c1.s4, c1.s5)), *((__global DATA_TYPE *)offset(in, c1.s6, c1.s7)),
+ *((__global DATA_TYPE *)offset(in, c1.s8, c1.s9)), *((__global DATA_TYPE *)offset(in, c1.sa, c1.sb)),
+ *((__global DATA_TYPE *)offset(in, c1.sc, c1.sd)), *((__global DATA_TYPE *)offset(in, c1.se, c1.sf)),
+ *((__global DATA_TYPE *)offset(in, c2.s0, c2.s1)), *((__global DATA_TYPE *)offset(in, c2.s2, c2.s3)),
+ *((__global DATA_TYPE *)offset(in, c2.s4, c2.s5)), *((__global DATA_TYPE *)offset(in, c2.s6, c2.s7)),
+ *((__global DATA_TYPE *)offset(in, c2.s8, c2.s9)), *((__global DATA_TYPE *)offset(in, c2.sa, c2.sb)),
+ *((__global DATA_TYPE *)offset(in, c2.sc, c2.sd)), *((__global DATA_TYPE *)offset(in, c2.se, c2.sf)));
+ const float8 a = coords - fc;
+ const float8 b = ((float8)(1.f)) - a;
+ const float4 fr =
+ (float4)(((t.s0 * b.s0 * b.s1) + (t.s1 * a.s0 * b.s1) + (t.s2 * b.s0 * a.s1) + (t.s3 * a.s0 * a.s1)),
+ ((t.s4 * b.s2 * b.s3) + (t.s5 * a.s2 * b.s3) + (t.s6 * b.s2 * a.s3) + (t.s7 * a.s2 * a.s3)),
+ ((t.s8 * b.s4 * b.s5) + (t.s9 * a.s4 * b.s5) + (t.sa * b.s4 * a.s5) + (t.sb * a.s4 * a.s5)),
+ ((t.sc * b.s6 * b.s7) + (t.sd * a.s6 * b.s7) + (t.se * b.s6 * a.s7) + (t.sf * a.s6 * a.s7)));
return CONVERT(fr, VEC_DATA_TYPE(DATA_TYPE, 4));
}
-/* FIXME(COMPMID-682): Clamp border properly in UNDEFINED border mode in Warp, Scale, Remap */
/** Computes the bilinear interpolation for each set of coordinates in the vector coords and returns the values
*
* @param[in] in Pointer to the source image.
@@ -134,7 +130,8 @@ inline const VEC_DATA_TYPE(DATA_TYPE, 4) bilinear_interpolate_with_border(const
* @param[in] width Width of the image
* @param[in] height Height of the image
*/
-inline const VEC_DATA_TYPE(DATA_TYPE, 4) bilinear_interpolate(const Image *in, const float8 coords, const float width, const float height)
+inline const VEC_DATA_TYPE(DATA_TYPE, 4)
+ bilinear_interpolate(const Image *in, const float8 coords, const float width, const float height)
{
return bilinear_interpolate_with_border(in, coords, width, height, 1);
}
diff --git a/src/core/CL/cl_kernels/warp_helpers_quantized.h b/src/core/CL/cl_kernels/warp_helpers_quantized.h
deleted file mode 100644
index ca21be6765..0000000000
--- a/src/core/CL/cl_kernels/warp_helpers_quantized.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers_asymm.h"
-
-/** Clamps the given coordinates to the borders according to the border size.
- *
- * @param[in] coords Vector of 2D coordinates to clamp. Even positions are X coords, odd positions are Y coords.
- * @param[in] width Width of the image
- * @param[in] height Height of the image
- * @param[in] border_size Border size of the image
- *
- */
-inline const float8 clamp_to_border_with_size_quantized(float8 coords, const float width, const float height, const float border_size)
-{
- const float4 clamped_x = clamp(coords.even, 0.0f - border_size, width - 1 + border_size);
- const float4 clamped_y = clamp(coords.odd, 0.0f - border_size, height - 1 + border_size);
- return (float8)(clamped_x.s0, clamped_y.s0, clamped_x.s1, clamped_y.s1, clamped_x.s2, clamped_y.s2, clamped_x.s3, clamped_y.s3);
-}
-
-/* FIXME(COMPMID-682): Clamp border properly in UNDEFINED border mode in Warp, Scale, Remap */
-/** Clamps the given coordinates to the borders.
- *
- * @param[in] coords Vector of 2D coordinates to clamp. Even positions are X coords, odd positions are Y coords.
- * @param[in] width Width of the image
- * @param[in] height Height of the image
- *
- */
-inline const float8 clamp_to_border_quantized(float8 coords, const float width, const float height)
-{
- return clamp_to_border_with_size_quantized(coords, width, height, 1);
-}
-
-/** Given a texel coordinates this function will return the following array of coordinates:
- * [ P, right neighbour, below neighbour, below right neighbour ]
- *
- * @note No checks to see if the coordinates are out of the image are done here.
- *
- * @param[in] coord Input coordinates
- *
- * @return vector of 8 floats with the coordinates, even positions are x and odd y.
- */
-inline const float8 get_neighbour_coords_quantized(const float2 coord)
-{
- return (float8)(/*tl*/ coord.s0, coord.s1, /*tr*/ coord.s0 + 1, coord.s1, /*bl*/ coord.s0, coord.s1 + 1, /*br*/ coord.s0 + 1, coord.s1 + 1);
-}
-
-/** Returns the current thread coordinates. */
-inline const float2 get_current_coords_quantized()
-{
- return (float2)(get_global_id(0) * 4, get_global_id(1));
-}
-
-/** Computes the bilinear interpolation for each set of coordinates in the vector coords and returns the values
- *
- * @param[in] in Pointer to the source image.
- * @param[in] coords Vector of four 2D coordinates. Even pos is x and odd y.
- * @param[in] width Width of the image
- * @param[in] height Height of the image
- * @param[in] border_size Border size
- * @param[in] scale Scale value
- * @param[in] offset_qasymm Offset value
- */
-inline const VEC_DATA_TYPE(DATA_TYPE, 4) bilinear_interpolate_with_border_quantized(const Image *in, const float8 coords, const float width, const float height, const float border_size,
- const float scale, const int offset_qasymm)
-{
- // If any of the 4 texels is out of the image's boundaries we use the border value (REPLICATE or CONSTANT) for any texel out of the image.
-
- // Sets the 4x4 coordinates for each of the four input texels
- const float8 fc = floor(coords);
- const float16 c1 = (float16)(
- clamp_to_border_with_size_quantized(get_neighbour_coords_quantized((float2)(fc.s0, fc.s1)), width, height, border_size),
- clamp_to_border_with_size_quantized(get_neighbour_coords_quantized((float2)(fc.s2, fc.s3)), width, height, border_size));
- const float16 c2 = (float16)(
- clamp_to_border_with_size_quantized(get_neighbour_coords_quantized((float2)(fc.s4, fc.s5)), width, height, border_size),
- clamp_to_border_with_size_quantized(get_neighbour_coords_quantized((float2)(fc.s6, fc.s7)), width, height, border_size));
-
- // Loads the values from the input image
- const int16 t = (int16)(
- /* tl, tr, bl, br */
- * ((__global DATA_TYPE *)offset(in, c1.s0, c1.s1)), *((__global DATA_TYPE *)offset(in, c1.s2, c1.s3)),
- *((__global DATA_TYPE *)offset(in, c1.s4, c1.s5)), *((__global DATA_TYPE *)offset(in, c1.s6, c1.s7)),
- *((__global DATA_TYPE *)offset(in, c1.s8, c1.s9)), *((__global DATA_TYPE *)offset(in, c1.sa, c1.sb)),
- *((__global DATA_TYPE *)offset(in, c1.sc, c1.sd)), *((__global DATA_TYPE *)offset(in, c1.se, c1.sf)),
- *((__global DATA_TYPE *)offset(in, c2.s0, c2.s1)), *((__global DATA_TYPE *)offset(in, c2.s2, c2.s3)),
- *((__global DATA_TYPE *)offset(in, c2.s4, c2.s5)), *((__global DATA_TYPE *)offset(in, c2.s6, c2.s7)),
- *((__global DATA_TYPE *)offset(in, c2.s8, c2.s9)), *((__global DATA_TYPE *)offset(in, c2.sa, c2.sb)),
- *((__global DATA_TYPE *)offset(in, c2.sc, c2.sd)), *((__global DATA_TYPE *)offset(in, c2.se, c2.sf)));
-
- const float16 inf32 = convert_float16(t - (int16)offset_qasymm) * (float16)scale;
-
- const float8 a = coords - fc;
- const float8 b = ((float8)(1.f)) - a;
- const float4 fr = (float4)(
- ((inf32.s0 * b.s0 * b.s1) + (inf32.s1 * a.s0 * b.s1) + (inf32.s2 * b.s0 * a.s1) + (inf32.s3 * a.s0 * a.s1)),
- ((inf32.s4 * b.s2 * b.s3) + (inf32.s5 * a.s2 * b.s3) + (inf32.s6 * b.s2 * a.s3) + (inf32.s7 * a.s2 * a.s3)),
- ((inf32.s8 * b.s4 * b.s5) + (inf32.s9 * a.s4 * b.s5) + (inf32.sa * b.s4 * a.s5) + (inf32.sb * a.s4 * a.s5)),
- ((inf32.sc * b.s6 * b.s7) + (inf32.sd * a.s6 * b.s7) + (inf32.se * b.s6 * a.s7) + (inf32.sf * a.s6 * a.s7)));
-
- const VEC_DATA_TYPE(DATA_TYPE, 4) res = CONVERT_SAT(convert_int4_sat_rtp(fr / scale) + offset_qasymm, VEC_DATA_TYPE(DATA_TYPE, 4));
-
- return res;
-}
-
-/* FIXME(COMPMID-682): Clamp border properly in UNDEFINED border mode in Warp, Scale, Remap */
-/** Computes the bilinear interpolation for each set of coordinates in the vector coords and returns the values
- *
- * @param[in] in Pointer to the source image.
- * @param[in] coords Vector of four 2D coordinates. Even pos is x and odd y.
- * @param[in] width Width of the image
- * @param[in] height Height of the image
- * @param[in] scale Scale value
- * @param[in] offset_qasymm Offset value
- */
-inline const VEC_DATA_TYPE(DATA_TYPE, 4) bilinear_interpolate_quantized(const Image *in, const float8 coords, const float width, const float height, const float scale, const int offset_qasymm)
-{
- return bilinear_interpolate_with_border_quantized(in, coords, width, height, 1, scale, offset_qasymm);
-}
diff --git a/src/core/CL/cl_kernels/winograd_input_transform.cl b/src/core/CL/cl_kernels/winograd_input_transform.cl
deleted file mode 100644
index 5e5b737785..0000000000
--- a/src/core/CL/cl_kernels/winograd_input_transform.cl
+++ /dev/null
@@ -1,2753 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#define FILL_ZERO_OUT_OF_BOUND_6_NHWC_H(datatype, basename, y_cond, z_cond) \
- ({ \
- basename##0 = select((datatype)0, basename##0, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s0) && (z_cond))); \
- basename##1 = select((datatype)0, basename##1, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s1) && (z_cond))); \
- basename##2 = select((datatype)0, basename##2, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s2) && (z_cond))); \
- basename##3 = select((datatype)0, basename##3, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s3) && (z_cond))); \
- basename##4 = select((datatype)0, basename##4, (SELECT_DATA_TYPE(datatype))(((y_cond##1).s0) && (z_cond))); \
- basename##5 = select((datatype)0, basename##5, (SELECT_DATA_TYPE(datatype))(((y_cond##1).s1) && (z_cond))); \
- })
-
-#define FILL_ZERO_OUT_OF_BOUND_6_NHWC_V(datatype, basename, y_cond, z_cond) \
- ({ \
- basename##0 = select((datatype)0, basename##0, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s0))); \
- basename##1 = select((datatype)0, basename##1, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s1))); \
- basename##2 = select((datatype)0, basename##2, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s2))); \
- basename##3 = select((datatype)0, basename##3, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s3))); \
- basename##4 = select((datatype)0, basename##4, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##1).s0))); \
- basename##5 = select((datatype)0, basename##5, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##1).s1))); \
- })
-
-#define FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(datatype, basename, y_cond, z_cond) \
- ({ \
- basename##0 = select((datatype)0, basename##0, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s0) && (z_cond))); \
- basename##1 = select((datatype)0, basename##1, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s1) && (z_cond))); \
- basename##2 = select((datatype)0, basename##2, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s2) && (z_cond))); \
- basename##3 = select((datatype)0, basename##3, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s3) && (z_cond))); \
- basename##4 = select((datatype)0, basename##4, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s4) && (z_cond))); \
- basename##5 = select((datatype)0, basename##5, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s5) && (z_cond))); \
- basename##6 = select((datatype)0, basename##6, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s6) && (z_cond))); \
- basename##7 = select((datatype)0, basename##7, (SELECT_DATA_TYPE(datatype))(((y_cond##0).s7) && (z_cond))); \
- })
-
-#define FILL_ZERO_OUT_OF_BOUND_8_NHWC_V(datatype, basename, y_cond, z_cond) \
- ({ \
- basename##0 = select((datatype)0, basename##0, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s0))); \
- basename##1 = select((datatype)0, basename##1, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s1))); \
- basename##2 = select((datatype)0, basename##2, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s2))); \
- basename##3 = select((datatype)0, basename##3, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s3))); \
- basename##4 = select((datatype)0, basename##4, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s4))); \
- basename##5 = select((datatype)0, basename##5, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s5))); \
- basename##6 = select((datatype)0, basename##6, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s6))); \
- basename##7 = select((datatype)0, basename##7, (SELECT_DATA_TYPE(datatype))((y_cond) && ((z_cond##0).s7))); \
- })
-
-#define OUTPUT_ROW_4x4_5x5(out, tmp, comm_fact) \
- ({ \
- comm_fact.s0 = tmp.s2 - 4.25f * tmp.s4 + tmp.s6; \
- comm_fact.s1 = tmp.s1 - 4.25f * tmp.s3 + tmp.s5; \
- comm_fact.s2 = 2.5f * tmp.s3; \
- comm_fact.s3 = 0.5f * tmp.s1 + 2.f * tmp.s5 - comm_fact.s2; \
- comm_fact.s4 = 0.25f * tmp.s2 - 1.25f * tmp.s4 + tmp.s6; \
- comm_fact.s5 = 4.f * tmp.s2 + tmp.s6 - 5.f * tmp.s4; \
- comm_fact.s6 = 2.f * tmp.s1 + 0.5f * tmp.s5 - comm_fact.s2; \
- \
- out.s0 = tmp.s0 - tmp.s6 + 5.25f * tmp.s4 - 5.25f * tmp.s2; \
- out.s1 = comm_fact.s0 + comm_fact.s1; \
- out.s2 = comm_fact.s0 - comm_fact.s1; \
- out.s3 = comm_fact.s3 + comm_fact.s4; \
- out.s4 = comm_fact.s4 - comm_fact.s3; \
- out.s5 = comm_fact.s5 + comm_fact.s6; \
- out.s6 = comm_fact.s5 - comm_fact.s6; \
- out.s7 = tmp.s7 - tmp.s1 + 5.25f * tmp.s3 - 5.25f * tmp.s5; \
- })
-
-#define OUTPUT_ROW_2x2_7x7(out, tmp, comm_fact) \
- ({ \
- comm_fact.s0 = 36.0f * tmp.s2 - 13.0f * tmp.s4 + tmp.s6; \
- comm_fact.s1 = 36.0f * tmp.s1 - 13.0f * tmp.s3 + 1.0f * tmp.s5; \
- comm_fact.s2 = 9.0f * tmp.s2 - 10.0f * tmp.s4 + tmp.s6; \
- comm_fact.s3 = 18.0f * tmp.s1 - 20.0f * tmp.s3 + 2.0f * tmp.s5; \
- comm_fact.s4 = 4.0f * tmp.s2 - 5.0f * tmp.s4 + tmp.s6; \
- comm_fact.s5 = 12.0f * tmp.s1 - 15.0f * tmp.s3 + 3.0f * tmp.s5; \
- out.s0 = -36.0f * tmp.s0 + 49.0f * tmp.s2 + -14.0f * tmp.s4 + tmp.s6; \
- out.s1 = comm_fact.s0 - comm_fact.s1; \
- out.s2 = comm_fact.s0 + comm_fact.s1; \
- out.s3 = comm_fact.s2 - comm_fact.s3; \
- out.s4 = comm_fact.s2 + comm_fact.s3; \
- out.s5 = comm_fact.s4 - comm_fact.s5; \
- out.s6 = comm_fact.s4 + comm_fact.s5; \
- out.s7 = -36.0f * tmp.s1 + 0.0f * tmp.s2 + 49.0f * tmp.s3 - 14.0f * tmp.s5 + tmp.s7; \
- })
-
-#if defined(NUM_TILES_X) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
-/** This OpenCL kernel computes the input transform when the kernel size is 3x3/3x1 or 1x3 and the output tile is 2x2/2x1 or 1x2
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_2x2_3x3_stepz1_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- const int x = get_global_id(0);
- const int y = get_global_id(1);
-#if defined(SRC_DEPTH)
- const int z = get_global_id(2) % SRC_DEPTH;
- const int b = get_global_id(2) / SRC_DEPTH;
-#else /* defined(SRC_DEPTH) */
- const int z = get_global_id(2);
-#endif /* defined(SRC_DEPTH) */
-
- // Compute input address
-#if defined(SRC_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z + b * src_stride_w;
-#else /* defined(SRC_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z;
-#endif /* defined(SRC_DEPTH) */
-
- src_addr = src_addr - ((int)PAD_LEFT * sizeof(DATA_TYPE)) - ((int)PAD_TOP * src_stride_y);
-
-#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row0 = vload4(0, (__global DATA_TYPE *)(src_addr));
-#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
-#else // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row0 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row1 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row2 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row3 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp0 = in_row0;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- tmp0 -= in_row2;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE out00 = tmp0.s0 - tmp0.s2;
- DATA_TYPE out01 = tmp0.s1 + tmp0.s2;
- DATA_TYPE out02 = tmp0.s2 - tmp0.s1;
- DATA_TYPE out03 = tmp0.s1 - tmp0.s3;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp1 = in_row1 + in_row2;
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp2 = in_row2 - in_row1;
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp3 = in_row1 - in_row3;
-
- DATA_TYPE out10 = tmp1.s0 - tmp1.s2;
- DATA_TYPE out11 = tmp1.s1 + tmp1.s2;
- DATA_TYPE out12 = tmp1.s2 - tmp1.s1;
- DATA_TYPE out13 = tmp1.s1 - tmp1.s3;
-
- DATA_TYPE out20 = tmp2.s0 - tmp2.s2;
- DATA_TYPE out21 = tmp2.s1 + tmp2.s2;
- DATA_TYPE out22 = tmp2.s2 - tmp2.s1;
- DATA_TYPE out23 = tmp2.s1 - tmp2.s3;
-
- DATA_TYPE out30 = tmp3.s0 - tmp3.s2;
- DATA_TYPE out31 = tmp3.s1 + tmp3.s2;
- DATA_TYPE out32 = tmp3.s2 - tmp3.s1;
- DATA_TYPE out33 = tmp3.s1 - tmp3.s3;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
-#if defined(SRC_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w;
-#else /* defined(SRC_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y;
-#endif /* defined(SRC_DEPTH) */
-
- *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z)) = out00; // in_row0.s0; out00;
- *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z)) = out01; // in_row0.s1; out01;
- *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z)) = out02; // in_row0.s2; out02;
- *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z)) = out03; // in_row0.s3; out03;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- *((__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z)) = out10;
- *((__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z)) = out11;
- *((__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z)) = out12;
- *((__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z)) = out13;
- *((__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z)) = out20;
- *((__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z)) = out21;
- *((__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z)) = out22;
- *((__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z)) = out23;
- *((__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z)) = out30;
- *((__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z)) = out31;
- *((__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z)) = out32;
- *((__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z)) = out33;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 3x3/3x1 or 1x3, the output tile is 2x2/2x1 or 1x2 and the number of channels is multiple of 2
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_2x2_3x3_stepz2_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- const int x = get_global_id(0);
- const int y = get_global_id(1);
-#if defined(SRC_DEPTH)
- const int z = (get_global_id(2) * 2) % SRC_DEPTH;
- const int b = (get_global_id(2) * 2) / SRC_DEPTH;
-#else /* defined(SRC_DEPTH) */
- const int z = get_global_id(2) * 2;
-#endif /* defined(SRC_DEPTH) */
-
- // Compute input address
-#if defined(SRC_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z + b * src_stride_w;
-#else /* defined(SRC_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z;
-#endif /* defined(SRC_DEPTH) */
- src_addr = src_addr - ((int)PAD_LEFT * sizeof(DATA_TYPE)) - ((int)PAD_TOP * src_stride_y);
-
-#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row0 = vload4(0, (__global DATA_TYPE *)(src_addr));
-#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
-#else // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row0 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row1 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row2 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row3 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- src_addr += src_stride_z;
-#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row4 = vload4(0, (__global DATA_TYPE *)(src_addr));
-#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row4 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
-#else // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row4 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row5 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row6 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 4)
- in_row7 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp0 = in_row0;
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp4 = in_row4;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- tmp0 -= in_row2;
- tmp4 -= in_row6;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out00 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp0.s0 - tmp0.s2, tmp4.s0 - tmp4.s2);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out01 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp0.s1 + tmp0.s2, tmp4.s1 + tmp4.s2);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out02 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp0.s2 - tmp0.s1, tmp4.s2 - tmp4.s1);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out03 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp0.s1 - tmp0.s3, tmp4.s1 - tmp4.s3);
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp1 = in_row1 + in_row2;
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp2 = in_row2 - in_row1;
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp3 = in_row1 - in_row3;
-
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp5 = in_row5 + in_row6;
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp6 = in_row6 - in_row5;
- VEC_DATA_TYPE(DATA_TYPE, 4)
- tmp7 = in_row5 - in_row7;
-
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out10 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp1.s0 - tmp1.s2, tmp5.s0 - tmp5.s2);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out11 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp1.s1 + tmp1.s2, tmp5.s1 + tmp5.s2);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out12 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp1.s2 - tmp1.s1, tmp5.s2 - tmp5.s1);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out13 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp1.s1 - tmp1.s3, tmp5.s1 - tmp5.s3);
-
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out20 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp2.s0 - tmp2.s2, tmp6.s0 - tmp6.s2);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out21 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp2.s1 + tmp2.s2, tmp6.s1 + tmp6.s2);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out22 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp2.s2 - tmp2.s1, tmp6.s2 - tmp6.s1);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out23 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp2.s1 - tmp2.s3, tmp6.s1 - tmp6.s3);
-
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out30 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp3.s0 - tmp3.s2, tmp7.s0 - tmp7.s2);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out31 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp3.s1 + tmp3.s2, tmp7.s1 + tmp7.s2);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out32 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp3.s2 - tmp3.s1, tmp7.s2 - tmp7.s1);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out33 = (VEC_DATA_TYPE(DATA_TYPE, 2))(tmp3.s1 - tmp3.s3, tmp7.s1 - tmp7.s3);
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
-#if defined(SRC_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w;
-#else /* defined(SRC_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y;
-#endif /* defined(SRC_DEPTH) */
-
- vstore2(out00, 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z));
- vstore2(out01, 0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z));
- vstore2(out02, 0, (__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z));
- vstore2(out03, 0, (__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z));
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- vstore2(out10, 0, (__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z));
- vstore2(out11, 0, (__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z));
- vstore2(out12, 0, (__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z));
- vstore2(out13, 0, (__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z));
- vstore2(out20, 0, (__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z));
- vstore2(out21, 0, (__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z));
- vstore2(out22, 0, (__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z));
- vstore2(out23, 0, (__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z));
- vstore2(out30, 0, (__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z));
- vstore2(out31, 0, (__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z));
- vstore2(out32, 0, (__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z));
- vstore2(out33, 0, (__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z));
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-}
-
-/** This OpenCL kernel computes the input transform when the output tile is 4x4/4x1 or 1x4, the filter size 3x3/3x1 or 1x3 and the data layout is NCHW
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_4x4_3x3_stepz1_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- const int x = get_global_id(0);
- const int y = get_global_id(1);
-#if defined(SRC_DEPTH)
- const int z = get_global_id(2) % SRC_DEPTH;
- const int b = get_global_id(2) / SRC_DEPTH;
-#else /* defined(SRC_DEPTH) */
- const int z = get_global_id(2);
-#endif /* defined(SRC_DEPTH) */
-
- // Compute input address
-#if defined(SRC_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z + b * src_stride_w;
-#else /* defined(SRC_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z;
-#endif /* defined(SRC_DEPTH) */
-
- src_addr = src_addr - ((int)PAD_LEFT * sizeof(DATA_TYPE)) - ((int)PAD_TOP * src_stride_y);
-
-#if defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- // Row0
- VEC_DATA_TYPE(DATA_TYPE, 4)
- d00 = (VEC_DATA_TYPE(DATA_TYPE, 4))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)));
- VEC_DATA_TYPE(DATA_TYPE, 2)
- d01 = (VEC_DATA_TYPE(DATA_TYPE, 2))(*((__global DATA_TYPE *)(src_addr + 4 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 5 * src_stride_y)));
-#else // defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- // Row0
- VEC_DATA_TYPE(DATA_TYPE, 4)
- d00 = vload4(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 2)
- d01 = vload2(2, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
-#endif // defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE out0 = 0.0f;
- DATA_TYPE out1 = 0.0f;
- DATA_TYPE out2 = 0.0f;
- DATA_TYPE out3 = 0.0f;
- DATA_TYPE out4 = 0.0f;
- DATA_TYPE out5 = 0.0f;
-
- // Channels [0, 5]: [out00, out01, out02, out03, out04, out05]
- out0 += 16.0f * d00.s0 - 20.0f * d00.s2 + 4.0f * d01.s0;
- out1 += -16.0f * d00.s1 - 16.0f * d00.s2 + 4.0f * d00.s3 + 4.0f * d01.s0;
- out2 += 16.0f * d00.s1 - 16.0f * d00.s2 - 4.0f * d00.s3 + 4.0f * d01.s0;
- out3 += -8.0f * d00.s1 - 4.0f * d00.s2 + 8.0f * d00.s3 + 4.0f * d01.s0;
- out4 += 8.0f * d00.s1 - 4.0f * d00.s2 - 8.0f * d00.s3 + 4.0f * d01.s0;
- out5 += 16.0f * d00.s1 - 20.0f * d00.s3 + 4.0f * d01.s1;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- // Row4
- VEC_DATA_TYPE(DATA_TYPE, 4)
- d40 = vload4(0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 2)
- d41 = vload2(2, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
-
- // k0, k1, k2, k3, k4, k5 are common terms for row0, row1, row2, row3 and row4
- DATA_TYPE k0 = d41.s0;
- DATA_TYPE k1 = d41.s0;
- DATA_TYPE k2 = d41.s0;
- DATA_TYPE k3 = d41.s0;
- DATA_TYPE k4 = d41.s0;
- DATA_TYPE k5 = 0.0f;
-
- k0 += 4.0f * d40.s0 - 5.0f * d40.s2;
- k1 += -4.0f * d40.s1 - 4.0f * d40.s2 + d40.s3;
- k2 += 4.0f * d40.s1 - 4.0f * d40.s2 - d40.s3;
- k3 += -2.0f * d40.s1 + 2.0f * d40.s3 - d40.s2;
- k4 += 2.0f * d40.s1 - 2.0f * d40.s3 - d40.s2;
- k5 += 4.0f * d40.s1 - 5.0f * d40.s3 + d41.s1;
-
- out0 += k0;
- out1 += k1;
- out2 += k2;
- out3 += k3;
- out4 += k4;
- out5 += k5;
-
- // Row2
- VEC_DATA_TYPE(DATA_TYPE, 4)
- d20 = vload4(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 2)
- d21 = vload2(2, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
-
- out0 += -20.0f * d20.s0 + 25.0f * d20.s2 - 5.0f * d21.s0;
- out1 += +20.0f * d20.s1 + 20.0f * d20.s2 - 5.0f * d20.s3 - 5.0f * d21.s0;
- out2 += -20.0f * d20.s1 + 20.0f * d20.s2 + 5.0f * d20.s3 - 5.0f * d21.s0;
- out3 += +10.0f * d20.s1 + 5.0f * d20.s2 - 10.0f * d20.s3 - 5.0f * d21.s0;
- out4 += -10.0f * d20.s1 + 5.0f * d20.s2 + 10.0f * d20.s3 - 5.0f * d21.s0;
- out5 += -20.0f * d20.s1 + 25.0f * d20.s3 - 5.0f * d21.s1;
-#endif // #if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- // Compute destination address
-#if defined(SRC_DEPTH)
- __global DATA_TYPE *dst_addr = (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w);
-#else /* defined(SRC_DEPTH) */
- __global DATA_TYPE *dst_addr = (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y);
-#endif /* defined(SRC_DEPTH) */
-
- uint dst_plane_stride = dst_stride_z / sizeof(DATA_TYPE);
-
- *(dst_addr) = out0;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out1;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out2;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out3;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out4;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out5;
- dst_addr += dst_plane_stride;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- DATA_TYPE out6 = k0;
- DATA_TYPE out7 = k1;
- DATA_TYPE out8 = k2;
- DATA_TYPE out9 = k3;
- DATA_TYPE out10 = k4;
- DATA_TYPE out11 = k5;
- DATA_TYPE out12 = k0;
- DATA_TYPE out13 = k1;
- DATA_TYPE out14 = k2;
- DATA_TYPE out15 = k3;
- DATA_TYPE out16 = k4;
- DATA_TYPE out17 = k5;
- DATA_TYPE out18 = k0;
- DATA_TYPE out19 = k1;
- DATA_TYPE out20 = k2;
- DATA_TYPE out21 = k3;
- DATA_TYPE out22 = k4;
- DATA_TYPE out23 = k5;
- DATA_TYPE out24 = k0;
- DATA_TYPE out25 = k1;
- DATA_TYPE out26 = k2;
- DATA_TYPE out27 = k3;
- DATA_TYPE out28 = k4;
- DATA_TYPE out29 = k5;
-
- // Row1
- VEC_DATA_TYPE(DATA_TYPE, 4)
- d10 = vload4(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 2)
- d11 = vload2(2, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
-
- // Row3
- VEC_DATA_TYPE(DATA_TYPE, 4)
- d30 = vload4(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 2)
- d31 = vload2(2, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
-
- // Compute common parts for the channels between [6, 29]
- // Channels [6, 11]: [out10, out11, out12, out13, out14, out15]
- // Channels [12, 17]: [out20, out21, out22, out23, out24, out25]
- DATA_TYPE part0 = -16.0f * d20.s0 + 20.0f * d20.s2 - 4.0f * d21.s0;
- DATA_TYPE part1 = 16.0f * d10.s0 - 20.0f * d10.s2 + 4.0f * d11.s0 - 4.0f * d30.s0 + 5.0f * d30.s2 - d31.s0;
- DATA_TYPE part2 = 16.0f * d20.s2 - 4.0f * d21.s0;
- DATA_TYPE part3 = 16.0f * d20.s1 - 4.0f * d20.s3;
- DATA_TYPE part4 = 16.0f * d10.s2 - 4.0f * d11.s0 - 4.0f * d30.s2 + d31.s0;
- DATA_TYPE part5 = 16.0f * d10.s1 - 4.0f * d10.s3 - 4.0f * d30.s1 + d30.s3;
- DATA_TYPE part6 = 4.0f * d20.s2 - 4.0f * d21.s0;
- DATA_TYPE part7 = 8.0f * d10.s1 - 8.0f * d10.s3 - 2.0f * d30.s1 + 2.0f * d30.s3;
- DATA_TYPE part8 = 4.0f * d10.s2 - 4.0f * d11.s0 - d30.s2 + d31.s0;
- DATA_TYPE part9 = 8.0f * d20.s1 - 8.0f * d20.s3;
- DATA_TYPE part10 = -16.0f * d20.s1 + 20.0f * d20.s3 - 4.0f * d21.s1;
- DATA_TYPE part11 = -16.0f * d10.s1 + 20.0f * d10.s3 - 4.0f * d11.s1 + 4.0f * d30.s1 - 5.0f * d30.s3 + d31.s1;
-
- // Channels [18, 23]: [out30, out31, out32, out33, out34, out35]
- // Channels [24, 29]: [out40, out41, out42, out43, out44, out45]
- DATA_TYPE part12 = 8.0f * d10.s0 - 10.0f * d10.s2 + 2.0f * d11.s0 - 8.0f * d30.s0 + 10.0f * d30.s2 - 2.0f * d31.s0;
- DATA_TYPE part13 = part0 * 0.25f; // -4.0f * d20.s0 + 5.0f * d20.s2 - d21.s0
- DATA_TYPE part14 = part2 * 0.25f; // 4.0f * d20.s2 - d21.s0
- DATA_TYPE part15 = 8.0f * d10.s1 - 2.0f * d10.s3 - 8.0f * d30.s1 + 2.0f * d30.s3;
- DATA_TYPE part16 = 8.0f * d10.s2 - 2.0f * d11.s0 - 8.0f * d30.s2 + 2.0f * d31.s0;
- DATA_TYPE part17 = part3 * 0.25f; // 4.0f * d20.s1 - d20.s3
- DATA_TYPE part18 = part6 * 0.25f; // d20.s2 - d21.s0
- DATA_TYPE part19 = 4.0f * d10.s1 - 4.0f * d10.s3 - 4.0f * d30.s1 + 4.0f * d30.s3;
- DATA_TYPE part20 = 2.0f * d10.s2 - 2.0f * d11.s0 - 2.0f * d30.s2 + 2.0f * d31.s0;
- DATA_TYPE part21 = part9 * 0.25f; // 2.0f * (d20.s1 - d20.s3)
- DATA_TYPE part22 = part10 * 0.25f; // - 4.0f * d20.s1 + 5.0f * d20.s3 - d21.s1
- DATA_TYPE part23 = part11 * 0.5f + 6.0f * d30.s1 - 7.5f * d30.s3 + 1.5f * d31.s1; // - 8.0f * d10.s1 + 10.0f * d10.s3 - 2.0f * d11.s1 + 8.0f * d30.s1 - 10.0f * d30.s3 + 2.0f * d31.s1;
-
- out6 += part0 - part1;
- out12 += part0 + part1;
- out7 += part2 + part3 + part4 + part5;
- out8 += part2 - part3 + part4 - part5;
- out13 += part2 + part3 - part4 - part5;
- out14 += part2 - part3 - part4 + part5;
- out9 += part6 + part7 + part8 + part9;
- out10 += part6 - part7 + part8 - part9;
- out15 += part6 - part7 - part8 + part9;
- out16 += part6 + part7 - part8 - part9;
- out11 += part10 + part11;
- out17 += part10 - part11;
-
- out18 += part13 - part12;
- out24 += part13 + part12;
- out19 += part14 + part15 + part16 + part17;
- out20 += part14 - part15 + part16 - part17;
- out25 += part14 - part15 - part16 + part17;
- out26 += part14 + part15 - part16 - part17;
- out21 += part18 + part19 + part20 + part21;
- out22 += part18 - part19 + part20 - part21;
- out27 += part18 - part19 - part20 + part21;
- out28 += part18 + part19 - part20 - part21;
- out23 += part22 + part23;
- out29 += part22 - part23;
-
- *(dst_addr) = out6;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out7;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out8;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out9;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out10;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out11;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out12;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out13;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out14;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out15;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out16;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out17;
- dst_addr += dst_plane_stride;
-
- *(dst_addr) = out18;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out19;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out20;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out21;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out22;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out23;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out24;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out25;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out26;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out27;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out28;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out29;
- dst_addr += dst_plane_stride;
-
- // Row5
- VEC_DATA_TYPE(DATA_TYPE, 4)
- d50 = vload4(0, (__global DATA_TYPE *)(src_addr + 5 * src_stride_y));
- VEC_DATA_TYPE(DATA_TYPE, 2)
- d51 = vload2(2, (__global DATA_TYPE *)(src_addr + 5 * src_stride_y));
-
- // Channels [30, 35]
- out0 = 16.0f * d10.s0 - 20.0f * d10.s2 - 20.0f * d30.s0 + 25.0f * d30.s2 + 4.0f * d50.s0 - 5.0f * d50.s2 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
- out1 = -16.0f * d10.s1 - 16.0f * d10.s2 + 4.0f * d10.s3 + 20.0f * d30.s1 + 20.0f * d30.s2 - 5.0f * d30.s3 - 4.0f * d50.s1 - 4.0f * d50.s2 + d50.s3 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
- out2 = 16.0f * d10.s1 - 16.0f * d10.s2 - 4.0f * d10.s3 - 20.0f * d30.s1 + 20.0f * d30.s2 + 5.0f * d30.s3 + 4.0f * d50.s1 - 4.0f * d50.s2 - d50.s3 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
- out3 = -8.0f * d10.s1 - 4.0f * d10.s2 + 8.0f * d10.s3 + 10.0f * d30.s1 - 10.0f * d30.s3 + 5.0f * d30.s2 - 2.0f * d50.s1 + 2.0f * d50.s3 - d50.s2 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
- out4 = 8.0f * d10.s1 - 4.0f * d10.s2 - 8.0f * d10.s3 - 10.0f * d30.s1 + 5.0f * d30.s2 + 10.0f * d30.s3 + 2.0f * d50.s1 - 2.0f * d50.s3 - d50.s2 + d51.s0 + 4.0f * d11.s0 - 5.0f * d31.s0;
- out5 = 16.0f * d10.s1 - 20.0f * d10.s3 + 4.0f * d11.s1 - 20.0f * d30.s1 + 25.0f * d30.s3 - 5.0f * d31.s1 + 4.0f * d50.s1 - 5.0f * d50.s3 + d51.s1;
-
- *(dst_addr) = out0;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out1;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out2;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out3;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out4;
- dst_addr += dst_plane_stride;
- *(dst_addr) = out5;
- dst_addr += dst_plane_stride;
-#endif // #if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 5x5/5x1 or 1x5 and the output tile is 4x4/4x1 or 1x4 when the data layout is NCHW
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note If this kernel is used to perform Winograd input transform 5x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd input transform 1x5, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_4x4_5x5_stepz1_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- const int x = get_global_id(0);
- const int y = get_global_id(1);
-#if defined(SRC_DEPTH)
- const int z = get_global_id(2) % SRC_DEPTH;
- const int b = get_global_id(2) / SRC_DEPTH;
-#else /* defined(SRC_DEPTH) */
- const int z = get_global_id(2);
-#endif /* defined(SRC_DEPTH) */
-
- // Compute input address
-#if defined(SRC_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z + b * src_stride_w;
-#else /* defined(SRC_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * OUTPUT_TILE_W * sizeof(DATA_TYPE) + y * OUTPUT_TILE_H * src_stride_y + z * src_stride_z;
-#endif /* defined(SRC_DEPTH) */
- src_addr = src_addr - ((int)PAD_LEFT * sizeof(DATA_TYPE)) - ((int)PAD_TOP * src_stride_y);
-
- // Load input tile
-#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row0 = vload8(0, (__global DATA_TYPE *)(src_addr));
-#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // !defined(WINOGRAD_FILTER_TRANSFORM_HORIZONTAL)
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row0 = (VEC_DATA_TYPE(DATA_TYPE, 8))(*((__global DATA_TYPE *)(src_addr + 0 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 1 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 2 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 3 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 4 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 5 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 6 * src_stride_y)),
- *((__global DATA_TYPE *)(src_addr + 7 * src_stride_y)));
-#else // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row0 = vload8(0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y));
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row1 = vload8(0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y));
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row2 = vload8(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y));
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row3 = vload8(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y));
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row4 = vload8(0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y));
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row5 = vload8(0, (__global DATA_TYPE *)(src_addr + 5 * src_stride_y));
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row6 = vload8(0, (__global DATA_TYPE *)(src_addr + 6 * src_stride_y));
- const VEC_DATA_TYPE(DATA_TYPE, 8) in_row7 = vload8(0, (__global DATA_TYPE *)(src_addr + 7 * src_stride_y));
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- // Calculate common factors for intermediate tensor
- VEC_DATA_TYPE(DATA_TYPE, 8)
- tmp0 = in_row0;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact0 = 0.0f;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- comm_fact0 += in_row2 + in_row6 - (DATA_TYPE)4.25 * in_row4;
- tmp0 += -in_row6 + (DATA_TYPE)5.25 * in_row4 - (DATA_TYPE)5.25 * in_row2;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact1 = in_row1 + in_row5 - (DATA_TYPE)4.25 * in_row3;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact2 = (DATA_TYPE)0.25 * in_row2 - (DATA_TYPE)1.25 * in_row4 + in_row6;
-
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp1 = comm_fact0 + comm_fact1;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp2 = comm_fact0 - comm_fact1;
-
- comm_fact0 = (DATA_TYPE)2.5 * in_row3;
- comm_fact1 = (DATA_TYPE)0.5 * in_row1 - comm_fact0 + (DATA_TYPE)2.0 * in_row5;
-
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp3 = comm_fact1 + comm_fact2;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp4 = comm_fact2 - comm_fact1;
-
- comm_fact1 = (DATA_TYPE)2.0 * in_row1 - comm_fact0 + (DATA_TYPE)0.5 * in_row5;
- comm_fact2 = (DATA_TYPE)4.0 * in_row2 - (DATA_TYPE)5.0 * in_row4 + in_row6;
-
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp5 = comm_fact1 + comm_fact2;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp6 = comm_fact2 - comm_fact1;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp7 = in_row7 - in_row1 + (DATA_TYPE)5.25 * in_row3 - (DATA_TYPE)5.25 * in_row5;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- // Calculate output rows (reuse comm_fact0 vector)
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out0;
-
- OUTPUT_ROW_4x4_5x5(out0, tmp0, comm_fact0);
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out1, out2, out3, out4, out5, out6, out7;
-
- OUTPUT_ROW_4x4_5x5(out1, tmp1, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out2, tmp2, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out3, tmp3, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out4, tmp4, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out5, tmp5, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out6, tmp6, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out7, tmp7, comm_fact0);
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- // Store values across the channels
-#if defined(SRC_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w;
-#else /* defined(SRC_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + z * sizeof(DATA_TYPE) + (x + y * (int)NUM_TILES_X) * dst_stride_y;
-#endif /* defined(SRC_DEPTH) */
-
- *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z)) = out0.s0;
- *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z)) = out0.s1;
- *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z)) = out0.s2;
- *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z)) = out0.s3;
- *((__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z)) = out0.s4;
- *((__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z)) = out0.s5;
- *((__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z)) = out0.s6;
- *((__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z)) = out0.s7;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- *((__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z)) = out1.s0;
- *((__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z)) = out1.s1;
- *((__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z)) = out1.s2;
- *((__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z)) = out1.s3;
- *((__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z)) = out1.s4;
- *((__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z)) = out1.s5;
- *((__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z)) = out1.s6;
- *((__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z)) = out1.s7;
- *((__global DATA_TYPE *)(dst_addr + 16 * dst_stride_z)) = out2.s0;
- *((__global DATA_TYPE *)(dst_addr + 17 * dst_stride_z)) = out2.s1;
- *((__global DATA_TYPE *)(dst_addr + 18 * dst_stride_z)) = out2.s2;
- *((__global DATA_TYPE *)(dst_addr + 19 * dst_stride_z)) = out2.s3;
- *((__global DATA_TYPE *)(dst_addr + 20 * dst_stride_z)) = out2.s4;
- *((__global DATA_TYPE *)(dst_addr + 21 * dst_stride_z)) = out2.s5;
- *((__global DATA_TYPE *)(dst_addr + 22 * dst_stride_z)) = out2.s6;
- *((__global DATA_TYPE *)(dst_addr + 23 * dst_stride_z)) = out2.s7;
- *((__global DATA_TYPE *)(dst_addr + 24 * dst_stride_z)) = out3.s0;
- *((__global DATA_TYPE *)(dst_addr + 25 * dst_stride_z)) = out3.s1;
- *((__global DATA_TYPE *)(dst_addr + 26 * dst_stride_z)) = out3.s2;
- *((__global DATA_TYPE *)(dst_addr + 27 * dst_stride_z)) = out3.s3;
- *((__global DATA_TYPE *)(dst_addr + 28 * dst_stride_z)) = out3.s4;
- *((__global DATA_TYPE *)(dst_addr + 29 * dst_stride_z)) = out3.s5;
- *((__global DATA_TYPE *)(dst_addr + 30 * dst_stride_z)) = out3.s6;
- *((__global DATA_TYPE *)(dst_addr + 31 * dst_stride_z)) = out3.s7;
- *((__global DATA_TYPE *)(dst_addr + 32 * dst_stride_z)) = out4.s0;
- *((__global DATA_TYPE *)(dst_addr + 33 * dst_stride_z)) = out4.s1;
- *((__global DATA_TYPE *)(dst_addr + 34 * dst_stride_z)) = out4.s2;
- *((__global DATA_TYPE *)(dst_addr + 35 * dst_stride_z)) = out4.s3;
- *((__global DATA_TYPE *)(dst_addr + 36 * dst_stride_z)) = out4.s4;
- *((__global DATA_TYPE *)(dst_addr + 37 * dst_stride_z)) = out4.s5;
- *((__global DATA_TYPE *)(dst_addr + 38 * dst_stride_z)) = out4.s6;
- *((__global DATA_TYPE *)(dst_addr + 39 * dst_stride_z)) = out4.s7;
- *((__global DATA_TYPE *)(dst_addr + 40 * dst_stride_z)) = out5.s0;
- *((__global DATA_TYPE *)(dst_addr + 41 * dst_stride_z)) = out5.s1;
- *((__global DATA_TYPE *)(dst_addr + 42 * dst_stride_z)) = out5.s2;
- *((__global DATA_TYPE *)(dst_addr + 43 * dst_stride_z)) = out5.s3;
- *((__global DATA_TYPE *)(dst_addr + 44 * dst_stride_z)) = out5.s4;
- *((__global DATA_TYPE *)(dst_addr + 45 * dst_stride_z)) = out5.s5;
- *((__global DATA_TYPE *)(dst_addr + 46 * dst_stride_z)) = out5.s6;
- *((__global DATA_TYPE *)(dst_addr + 47 * dst_stride_z)) = out5.s7;
- *((__global DATA_TYPE *)(dst_addr + 48 * dst_stride_z)) = out6.s0;
- *((__global DATA_TYPE *)(dst_addr + 49 * dst_stride_z)) = out6.s1;
- *((__global DATA_TYPE *)(dst_addr + 50 * dst_stride_z)) = out6.s2;
- *((__global DATA_TYPE *)(dst_addr + 51 * dst_stride_z)) = out6.s3;
- *((__global DATA_TYPE *)(dst_addr + 52 * dst_stride_z)) = out6.s4;
- *((__global DATA_TYPE *)(dst_addr + 53 * dst_stride_z)) = out6.s5;
- *((__global DATA_TYPE *)(dst_addr + 54 * dst_stride_z)) = out6.s6;
- *((__global DATA_TYPE *)(dst_addr + 55 * dst_stride_z)) = out6.s7;
- *((__global DATA_TYPE *)(dst_addr + 56 * dst_stride_z)) = out7.s0;
- *((__global DATA_TYPE *)(dst_addr + 57 * dst_stride_z)) = out7.s1;
- *((__global DATA_TYPE *)(dst_addr + 58 * dst_stride_z)) = out7.s2;
- *((__global DATA_TYPE *)(dst_addr + 59 * dst_stride_z)) = out7.s3;
- *((__global DATA_TYPE *)(dst_addr + 60 * dst_stride_z)) = out7.s4;
- *((__global DATA_TYPE *)(dst_addr + 61 * dst_stride_z)) = out7.s5;
- *((__global DATA_TYPE *)(dst_addr + 62 * dst_stride_z)) = out7.s6;
- *((__global DATA_TYPE *)(dst_addr + 63 * dst_stride_z)) = out7.s7;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-}
-
-#if defined(SRC_DIM_1) && defined(SRC_DIM_2)
-/** This OpenCL kernel computes the input transform when the output tile is 4x4, 4x1 or 1x4, the filter size 3x3, 3x1 or 1x3 and the data layout is NHWC
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note Dimension one of the input tensor (width for NHWC data layout) must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM_1=112)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note If this kernel is used to perform Winograd input transform 3x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd input transform 1x3, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_4x4_3x3_stepz1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- // Index channel
- const int x = get_global_id(0);
- // Index width
- const int y = get_global_id(1);
-#if defined(NUM_TILES_Y)
- // Index height
- const int z = get_global_id(2) % NUM_TILES_Y;
- // Index batch size
- const int b = get_global_id(2) / NUM_TILES_Y;
-#else // defined(NUM_TILES_Y)
- // Index height
- const int z = get_global_id(2);
-#endif // defined(NUM_TILES_Y)
-
-#if defined(NUM_TILES_Y)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + b * src_stride_w;
-#else // defined(NUM_TILES_Y)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE);
-#endif // defined(NUM_TILES_Y)
-
- // Origin coordinates for the width (y) and height (z) in the input tensor
- int4 y_coord0 = (int4)(y * OUTPUT_TILE_W) + (int4)(0, 1, 2, 3) - (int4)PAD_LEFT;
- int2 y_coord1 = (int2)(y * OUTPUT_TILE_W) + (int2)(4, 5) - (int2)PAD_LEFT;
- int4 z_coord0 = (int4)(z * OUTPUT_TILE_H) + (int4)(0, 1, 2, 3) - (int4)PAD_TOP;
- int2 z_coord1 = (int2)(z * OUTPUT_TILE_H) + (int2)(4, 5) - (int2)PAD_TOP;
-
- // Coordinates to use to avoid out-of-bound reads
- int4 y_coord_valid0 = clamp(y_coord0, (int4)0, (int4)((int)SRC_DIM_1 - 1));
- int2 y_coord_valid1 = clamp(y_coord1, (int2)0, (int2)((int)SRC_DIM_1 - 1));
- int4 z_coord_valid0 = clamp(z_coord0, (int4)0, (int4)((int)SRC_DIM_2 - 1));
- int2 z_coord_valid1 = clamp(z_coord1, (int2)0, (int2)((int)SRC_DIM_2 - 1));
-
- // Boundary conditions
- int4 y_cond0 = y_coord_valid0 == y_coord0;
- int2 y_cond1 = y_coord_valid1 == y_coord1;
- int4 z_cond0 = z_coord_valid0 == z_coord0;
- int2 z_cond1 = z_coord_valid1 == z_coord1;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE d40 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid1.s0 * src_stride_z);
- DATA_TYPE d41 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid1.s0 * src_stride_z);
- DATA_TYPE d42 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid1.s0 * src_stride_z);
- DATA_TYPE d43 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid1.s0 * src_stride_z);
- DATA_TYPE d44 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s0 * (int)src_stride_y + z_coord_valid1.s0 * src_stride_z);
- DATA_TYPE d45 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s1 * (int)src_stride_y + z_coord_valid1.s0 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_6_NHWC_H(DATA_TYPE, d4, y_cond, z_cond1.s0);
-
- DATA_TYPE k0 = d44;
- DATA_TYPE k1 = d44;
- DATA_TYPE k2 = d44;
- DATA_TYPE k3 = d44;
- DATA_TYPE k4 = d44;
- DATA_TYPE k5 = (DATA_TYPE)0.0f;
-
- k0 += 4.0f * d40 - 5.0f * d42;
- k1 += -4.0f * d41 - 4.0f * d42 + d43;
- k2 += 4.0f * d41 - 4.0f * d42 - d43;
- k3 += -2.0f * d41 + 2.0f * d43 - d42;
- k4 += 2.0f * d41 - 2.0f * d43 - d42;
- k5 += 4.0f * d41 - 5.0f * d43 + d45;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- DATA_TYPE d00 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- DATA_TYPE d01 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- DATA_TYPE d02 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- DATA_TYPE d03 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- DATA_TYPE d04 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s0 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- DATA_TYPE d05 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s1 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_6_NHWC_H(DATA_TYPE, d0, y_cond, z_cond0.s0);
-
-#else // !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- DATA_TYPE d00 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- DATA_TYPE d01 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- DATA_TYPE d02 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- DATA_TYPE d03 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- DATA_TYPE d04 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid1.s0 * src_stride_z);
- DATA_TYPE d05 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid1.s1 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_6_NHWC_V(DATA_TYPE, d0, y_cond0.s0, z_cond);
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE out0 = 16.0f * d00 - 20.0f * d02 + 4.0f * d04;
- DATA_TYPE out1 = -16.0f * d01 - 16.0f * d02 + 4.0f * d03 + 4.0f * d04;
- DATA_TYPE out2 = 16.0f * d01 - 16.0f * d02 - 4.0f * d03 + 4.0f * d04;
- DATA_TYPE out3 = -8.0f * d01 - 4.0f * d02 + 8.0f * d03 + 4.0f * d04;
- DATA_TYPE out4 = 8.0f * d01 - 4.0f * d02 - 8.0f * d03 + 4.0f * d04;
- DATA_TYPE out5 = 16.0f * d01 - 20.0f * d03 + 4.0f * d05;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- DATA_TYPE d20 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- DATA_TYPE d21 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- DATA_TYPE d22 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- DATA_TYPE d23 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- DATA_TYPE d24 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s0 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- DATA_TYPE d25 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s1 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_6_NHWC_H(DATA_TYPE, d2, y_cond, z_cond0.s2);
-
- out0 += k0;
- out1 += k1;
- out2 += k2;
- out3 += k3;
- out4 += k4;
- out5 += k5;
- DATA_TYPE out6 = k0;
- DATA_TYPE out7 = k1;
- DATA_TYPE out8 = k2;
- DATA_TYPE out9 = k3;
- DATA_TYPE out10 = k4;
- DATA_TYPE out11 = k5;
- DATA_TYPE out12 = k0;
- DATA_TYPE out13 = k1;
- DATA_TYPE out14 = k2;
- DATA_TYPE out15 = k3;
- DATA_TYPE out16 = k4;
- DATA_TYPE out17 = k5;
- DATA_TYPE out18 = k0;
- DATA_TYPE out19 = k1;
- DATA_TYPE out20 = k2;
- DATA_TYPE out21 = k3;
- DATA_TYPE out22 = k4;
- DATA_TYPE out23 = k5;
- DATA_TYPE out24 = k0;
- DATA_TYPE out25 = k1;
- DATA_TYPE out26 = k2;
- DATA_TYPE out27 = k3;
- DATA_TYPE out28 = k4;
- DATA_TYPE out29 = k5;
-
- // Channels [0, 5]: [out00, out01, out02, out03, out04, out05]
- out0 += -20.0f * d20 + 25.0f * d22 - 5.0f * d24;
- out1 += 20.0f * d21 + 20.0f * d22 - 5.0f * d23 - 5.0f * d24;
- out2 += -20.0f * d21 + 20.0f * d22 + 5.0f * d23 - 5.0f * d24;
- out3 += 10.0f * d21 + 5.0f * d22 - 10.0f * d23 - 5.0f * d24;
- out4 += -10.0f * d21 + 5.0f * d22 + 10.0f * d23 - 5.0f * d24;
- out5 += -20.0f * d21 + 25.0f * d23 - 5.0f * d25;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- // Compute destination address
-#if defined(NUM_TILES_Y)
- __global DATA_TYPE *dst_addr = (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + (y + z * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w);
-#else // defined(NUM_TILES_Y)
- __global DATA_TYPE *dst_addr = (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + (y + z * (int)NUM_TILES_X) * dst_stride_y);
-#endif // defined(NUM_TILES_Y)
-
- uint dst_plane_stride = dst_stride_z / sizeof(DATA_TYPE);
-
- *((__global DATA_TYPE *)dst_addr) = out0;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out1;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out2;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out3;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out4;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out5;
- dst_addr += dst_plane_stride;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- DATA_TYPE d10 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- DATA_TYPE d11 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- DATA_TYPE d12 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- DATA_TYPE d13 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- DATA_TYPE d14 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s0 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- DATA_TYPE d15 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s1 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
-
- DATA_TYPE d30 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- DATA_TYPE d31 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- DATA_TYPE d32 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- DATA_TYPE d33 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- DATA_TYPE d34 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s0 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- DATA_TYPE d35 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s1 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_6_NHWC_H(DATA_TYPE, d1, y_cond, z_cond0.s1);
- FILL_ZERO_OUT_OF_BOUND_6_NHWC_H(DATA_TYPE, d3, y_cond, z_cond0.s3);
-
- // Compute common parts for the channels between [6, 29]
- // Channels [6, 11]: [out10, out11, out12, out13, out14, out15]
- // Channels [12, 17]: [out20, out21, out22, out23, out24, out25]
- DATA_TYPE part0 = -16.0f * d20 + 20.0f * d22 - 4.0f * d24;
- DATA_TYPE part1 = 16.0f * d10 - 20.0f * d12 + 4.0f * d14 - 4.0f * d30 + 5.0f * d32 - d34;
- DATA_TYPE part2 = 16.0f * d22 - 4.0f * d24;
- DATA_TYPE part3 = 16.0f * d21 - 4.0f * d23;
- DATA_TYPE part4 = 16.0f * d12 - 4.0f * d14 - 4.0f * d32 + d34;
- DATA_TYPE part5 = 16.0f * d11 - 4.0f * d13 - 4.0f * d31 + d33;
- DATA_TYPE part6 = 4.0f * d22 - 4.0f * d24;
- DATA_TYPE part7 = 8.0f * d11 - 8.0f * d13 - 2.0f * d31 + 2.0f * d33;
- DATA_TYPE part8 = 4.0f * d12 - 4.0f * d14 - d32 + d34;
- DATA_TYPE part9 = 8.0f * d21 - 8.0f * d23;
- DATA_TYPE part10 = -16.0f * d21 + 20.0f * d23 - 4.0f * d25;
- DATA_TYPE part11 = -16.0f * d11 + 20.0f * d13 - 4.0f * d15 + 4.0f * d31 - 5.0f * d33 + d35;
-
- // Channels [18, 23]: [out30, out31, out32, out33, out34, out35]
- // Channels [24, 29]: [out40, out41, out42, out43, out44, out45]
- DATA_TYPE part12 = 8.0f * d10 - 10.0f * d12 + 2.0f * d14 - 8.0f * d30 + 10.0f * d32 - 2.0f * d34;
- DATA_TYPE part13 = part0 * 0.25f; // -4.0f * d20 + 5.0f * d22 - d24
- DATA_TYPE part14 = part2 * 0.25f; // 4.0f * d22 - d24
- DATA_TYPE part15 = 8.0f * d11 - 2.0f * d13 - 8.0f * d31 + 2.0f * d33;
- DATA_TYPE part16 = 8.0f * d12 - 2.0f * d14 - 8.0f * d32 + 2.0f * d34;
- DATA_TYPE part17 = part3 * 0.25f; // 4.0f * d21 - d23
- DATA_TYPE part18 = part6 * 0.25f; // d22 - d24
- DATA_TYPE part19 = 4.0f * d11 - 4.0f * d13 - 4.0f * d31 + 4.0f * d33;
- DATA_TYPE part20 = 2.0f * d12 - 2.0f * d14 - 2.0f * d32 + 2.0f * d34;
- DATA_TYPE part21 = part9 * 0.25f; // 2.0f * (d21 - d23)
- DATA_TYPE part22 = part10 * 0.25f; // - 4.0f * d21 + 5.0f * d23 - d25
- DATA_TYPE part23 = part11 * 0.5f + 6.0f * d31 - 7.5f * d33 + 1.5f * d35; // - 8.0f * d11 + 10.0f * d13 - 2.0f * d15 + 8.0f * d31 - 10.0f * d33 + 2.0f * d35;
-
- out6 += part0 - part1;
- out12 += part0 + part1;
- out7 += part2 + part3 + part4 + part5;
- out8 += part2 - part3 + part4 - part5;
- out13 += part2 + part3 - part4 - part5;
- out14 += part2 - part3 - part4 + part5;
- out9 += part6 + part7 + part8 + part9;
- out10 += part6 - part7 + part8 - part9;
- out15 += part6 - part7 - part8 + part9;
- out16 += part6 + part7 - part8 - part9;
- out11 += part10 + part11;
- out17 += part10 - part11;
-
- out18 += part13 - part12;
- out24 += part13 + part12;
- out19 += part14 + part15 + part16 + part17;
- out20 += part14 - part15 + part16 - part17;
- out25 += part14 - part15 - part16 + part17;
- out26 += part14 + part15 - part16 - part17;
- out21 += part18 + part19 + part20 + part21;
- out22 += part18 - part19 + part20 - part21;
- out27 += part18 - part19 - part20 + part21;
- out28 += part18 + part19 - part20 - part21;
- out23 += part22 + part23;
- out29 += part22 - part23;
-
- *((__global DATA_TYPE *)dst_addr) = out6;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out7;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out8;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out9;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out10;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out11;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out12;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out13;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out14;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out15;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out16;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out17;
- dst_addr += dst_plane_stride;
-
- *((__global DATA_TYPE *)dst_addr) = out18;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out19;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out20;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out21;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out22;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out23;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out24;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out25;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out26;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out27;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out28;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out29;
- dst_addr += dst_plane_stride;
-
- // Row5
- DATA_TYPE d50 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid1.s1 * src_stride_z);
- DATA_TYPE d51 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid1.s1 * src_stride_z);
- DATA_TYPE d52 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid1.s1 * src_stride_z);
- DATA_TYPE d53 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid1.s1 * src_stride_z);
- DATA_TYPE d54 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s0 * (int)src_stride_y + z_coord_valid1.s1 * src_stride_z);
- DATA_TYPE d55 = *(__global DATA_TYPE *)(src_addr + y_coord_valid1.s1 * (int)src_stride_y + z_coord_valid1.s1 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_6_NHWC_H(DATA_TYPE, d5, y_cond, z_cond1.s1);
-
- // Channels [30, 35]
- out0 = 16.0f * d10 - 20.0f * d12 - 20.0f * d30 + 25.0f * d32 + 4.0f * d50 - 5.0f * d52 + d54 + 4.0f * d14 - 5.0f * d34;
- out1 = -16.0f * d11 - 16.0f * d12 + 4.0f * d13 + 20.0f * d31 + 20.0f * d32 - 5.0f * d33 - 4.0f * d51 - 4.0f * d52 + d53 + d54 + 4.0f * d14 - 5.0f * d34;
- out2 = 16.0f * d11 - 16.0f * d12 - 4.0f * d13 - 20.0f * d31 + 20.0f * d32 + 5.0f * d33 + 4.0f * d51 - 4.0f * d52 - d53 + d54 + 4.0f * d14 - 5.0f * d34;
- out3 = -8.0f * d11 - 4.0f * d12 + 8.0f * d13 + 10.0f * d31 - 10.0f * d33 + 5.0f * d32 - 2.0f * d51 + 2.0f * d53 - d52 + d54 + 4.0f * d14 - 5.0f * d34;
- out4 = 8.0f * d11 - 4.0f * d12 - 8.0f * d13 - 10.0f * d31 + 5.0f * d32 + 10.0f * d33 + 2.0f * d51 - 2.0f * d53 - d52 + d54 + 4.0f * d14 - 5.0f * d34;
- out5 = 16.0f * d11 - 20.0f * d13 + 4.0f * d15 - 20.0f * d31 + 25.0f * d33 - 5.0f * d35 + 4.0f * d51 - 5.0f * d53 + d55;
-
- *((__global DATA_TYPE *)dst_addr) = out0;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out1;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out2;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out3;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out4;
- dst_addr += dst_plane_stride;
- *((__global DATA_TYPE *)dst_addr) = out5;
- dst_addr += dst_plane_stride;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 5x5/5x1 or 1x5 and the output tile is 4x4/4x1 or 1x4 when the data layout is NHWC
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note Dimension one of the input tensor (width for NHWC data layout) must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM_1=112)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note If this kernel is used to perform Winograd input transform 5x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd input transform 1x5, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_4x4_5x5_stepz1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- const int x = get_global_id(0);
- const int y = get_global_id(1);
-#if defined(NUM_TILES_Y)
- const int z = get_global_id(2) % NUM_TILES_Y;
- const int b = get_global_id(2) / NUM_TILES_Y;
-#else // defined(NUM_TILES_Y)
- const int z = get_global_id(2);
-#endif // defined(NUM_TILES_Y)
-
- // Compute input address
-#if defined(NUM_TILES_Y)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + b * src_stride_w;
-#else // defined(NUM_TILES_Y)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE);
-#endif // defined(NUM_TILES_Y)
-
- // Origin coordinates for the width (y) and height (z) in the input tensor
- int8 y_coord0 = (int8)(y * OUTPUT_TILE_W) + (int8)(0, 1, 2, 3, 4, 5, 6, 7) - (int8)PAD_LEFT;
- int8 z_coord0 = (int8)(z * OUTPUT_TILE_H) + (int8)(0, 1, 2, 3, 4, 5, 6, 7) - (int8)PAD_TOP;
-
- // Coordinates to use to avoid out-of-bound reads
- int8 y_coord_valid0 = clamp(y_coord0, (int8)0, (int8)((int)SRC_DIM_1 - 1));
- int8 z_coord_valid0 = clamp(z_coord0, (int8)0, (int8)((int)SRC_DIM_2 - 1));
-
- // Boundary conditions
- int8 y_cond0 = y_coord_valid0 == y_coord0;
- int8 z_cond0 = z_coord_valid0 == z_coord0;
-
-#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
-
- // Load the input tile
- VEC_DATA_TYPE(DATA_TYPE, 8)
- in_row0;
- in_row0.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row0.s, y_cond, z_cond0.s0);
-
- // Calculate common factors for intermediate tensor
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact0 = 0.0f;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- tmp0 = in_row0;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out0 = (VEC_DATA_TYPE(DATA_TYPE, 8))0.0f;
-
- OUTPUT_ROW_4x4_5x5(out0, tmp0, comm_fact0);
-
-#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
-
- // Load the input tile
- VEC_DATA_TYPE(DATA_TYPE, 8)
- in_row0;
- in_row0.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row0.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row0.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row0.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row0.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row0.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row0.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_V(DATA_TYPE, in_row0.s, y_cond0.s0, z_cond);
-
- // Calculate common factors for intermediate tensor
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact0 = 0.0f;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- tmp0 = in_row0;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out0 = (VEC_DATA_TYPE(DATA_TYPE, 8))0.0f;
-
- OUTPUT_ROW_4x4_5x5(out0, tmp0, comm_fact0);
-#else // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 8)
- in_row0, in_row1, in_row2, in_row3, in_row4, in_row5, in_row6, in_row7;
-
- // Row0
- in_row0.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row0.s, y_cond, z_cond0.s0);
-
- // Row1
- in_row1.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row1.s, y_cond, z_cond0.s1);
-
- // Row2
- in_row2.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row2.s, y_cond, z_cond0.s2);
-
- // Row3
- in_row3.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row3.s, y_cond, z_cond0.s3);
-
- // Row4
- in_row4.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row4.s, y_cond, z_cond0.s4);
-
- // Row5
- in_row5.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row5.s, y_cond, z_cond0.s5);
-
- // Row6
- in_row6.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row6.s, y_cond, z_cond0.s6);
-
- // Row7
- in_row7.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row7.s, y_cond, z_cond0.s7);
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact0 = in_row2 + in_row6 - (DATA_TYPE)4.25f * in_row4;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact1 = in_row1 + in_row5 - (DATA_TYPE)4.25f * in_row3;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact2 = (DATA_TYPE)0.25f * in_row2 - (DATA_TYPE)1.25f * in_row4 + in_row6;
-
- // Calculate intermediate tensor and reuse common factor vectors
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp0 = in_row0 - in_row6 + (DATA_TYPE)5.25f * in_row4 - (DATA_TYPE)5.25f * in_row2;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp1 = comm_fact0 + comm_fact1;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp2 = comm_fact0 - comm_fact1;
-
- comm_fact0 = (DATA_TYPE)2.5f * in_row3;
- comm_fact1 = (DATA_TYPE)0.5f * in_row1 - comm_fact0 + (DATA_TYPE)2.f * in_row5;
-
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp3 = comm_fact1 + comm_fact2;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp4 = comm_fact2 - comm_fact1;
-
- comm_fact1 = (DATA_TYPE)2.f * in_row1 - comm_fact0 + (DATA_TYPE)0.5f * in_row5;
- comm_fact2 = (DATA_TYPE)4.f * in_row2 - (DATA_TYPE)5.f * in_row4 + in_row6;
-
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp5 = comm_fact1 + comm_fact2;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp6 = comm_fact2 - comm_fact1;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp7 = in_row7 - in_row1 + (DATA_TYPE)5.25f * in_row3 - (DATA_TYPE)5.25f * in_row5;
-
- // Calculate output rows (reuse comm_fact0 vector)
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out0, out1, out2, out3, out4, out5, out6, out7;
- OUTPUT_ROW_4x4_5x5(out0, tmp0, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out1, tmp1, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out2, tmp2, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out3, tmp3, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out4, tmp4, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out5, tmp5, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out6, tmp6, comm_fact0);
- OUTPUT_ROW_4x4_5x5(out7, tmp7, comm_fact0);
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- // Store values across the channels
-#if defined(NUM_TILES_Y)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + (y + z * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w;
-#else /* NUM_TILES_Y */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + (y + z * (int)NUM_TILES_X) * dst_stride_y;
-#endif /* NUM_TILES_Y */
-
- *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z)) = out0.s0;
- *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z)) = out0.s1;
- *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z)) = out0.s2;
- *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z)) = out0.s3;
- *((__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z)) = out0.s4;
- *((__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z)) = out0.s5;
- *((__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z)) = out0.s6;
- *((__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z)) = out0.s7;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- *((__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z)) = out1.s0;
- *((__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z)) = out1.s1;
- *((__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z)) = out1.s2;
- *((__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z)) = out1.s3;
- *((__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z)) = out1.s4;
- *((__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z)) = out1.s5;
- *((__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z)) = out1.s6;
- *((__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z)) = out1.s7;
- *((__global DATA_TYPE *)(dst_addr + 16 * dst_stride_z)) = out2.s0;
- *((__global DATA_TYPE *)(dst_addr + 17 * dst_stride_z)) = out2.s1;
- *((__global DATA_TYPE *)(dst_addr + 18 * dst_stride_z)) = out2.s2;
- *((__global DATA_TYPE *)(dst_addr + 19 * dst_stride_z)) = out2.s3;
- *((__global DATA_TYPE *)(dst_addr + 20 * dst_stride_z)) = out2.s4;
- *((__global DATA_TYPE *)(dst_addr + 21 * dst_stride_z)) = out2.s5;
- *((__global DATA_TYPE *)(dst_addr + 22 * dst_stride_z)) = out2.s6;
- *((__global DATA_TYPE *)(dst_addr + 23 * dst_stride_z)) = out2.s7;
- *((__global DATA_TYPE *)(dst_addr + 24 * dst_stride_z)) = out3.s0;
- *((__global DATA_TYPE *)(dst_addr + 25 * dst_stride_z)) = out3.s1;
- *((__global DATA_TYPE *)(dst_addr + 26 * dst_stride_z)) = out3.s2;
- *((__global DATA_TYPE *)(dst_addr + 27 * dst_stride_z)) = out3.s3;
- *((__global DATA_TYPE *)(dst_addr + 28 * dst_stride_z)) = out3.s4;
- *((__global DATA_TYPE *)(dst_addr + 29 * dst_stride_z)) = out3.s5;
- *((__global DATA_TYPE *)(dst_addr + 30 * dst_stride_z)) = out3.s6;
- *((__global DATA_TYPE *)(dst_addr + 31 * dst_stride_z)) = out3.s7;
- *((__global DATA_TYPE *)(dst_addr + 32 * dst_stride_z)) = out4.s0;
- *((__global DATA_TYPE *)(dst_addr + 33 * dst_stride_z)) = out4.s1;
- *((__global DATA_TYPE *)(dst_addr + 34 * dst_stride_z)) = out4.s2;
- *((__global DATA_TYPE *)(dst_addr + 35 * dst_stride_z)) = out4.s3;
- *((__global DATA_TYPE *)(dst_addr + 36 * dst_stride_z)) = out4.s4;
- *((__global DATA_TYPE *)(dst_addr + 37 * dst_stride_z)) = out4.s5;
- *((__global DATA_TYPE *)(dst_addr + 38 * dst_stride_z)) = out4.s6;
- *((__global DATA_TYPE *)(dst_addr + 39 * dst_stride_z)) = out4.s7;
- *((__global DATA_TYPE *)(dst_addr + 40 * dst_stride_z)) = out5.s0;
- *((__global DATA_TYPE *)(dst_addr + 41 * dst_stride_z)) = out5.s1;
- *((__global DATA_TYPE *)(dst_addr + 42 * dst_stride_z)) = out5.s2;
- *((__global DATA_TYPE *)(dst_addr + 43 * dst_stride_z)) = out5.s3;
- *((__global DATA_TYPE *)(dst_addr + 44 * dst_stride_z)) = out5.s4;
- *((__global DATA_TYPE *)(dst_addr + 45 * dst_stride_z)) = out5.s5;
- *((__global DATA_TYPE *)(dst_addr + 46 * dst_stride_z)) = out5.s6;
- *((__global DATA_TYPE *)(dst_addr + 47 * dst_stride_z)) = out5.s7;
- *((__global DATA_TYPE *)(dst_addr + 48 * dst_stride_z)) = out6.s0;
- *((__global DATA_TYPE *)(dst_addr + 49 * dst_stride_z)) = out6.s1;
- *((__global DATA_TYPE *)(dst_addr + 50 * dst_stride_z)) = out6.s2;
- *((__global DATA_TYPE *)(dst_addr + 51 * dst_stride_z)) = out6.s3;
- *((__global DATA_TYPE *)(dst_addr + 52 * dst_stride_z)) = out6.s4;
- *((__global DATA_TYPE *)(dst_addr + 53 * dst_stride_z)) = out6.s5;
- *((__global DATA_TYPE *)(dst_addr + 54 * dst_stride_z)) = out6.s6;
- *((__global DATA_TYPE *)(dst_addr + 55 * dst_stride_z)) = out6.s7;
- *((__global DATA_TYPE *)(dst_addr + 56 * dst_stride_z)) = out7.s0;
- *((__global DATA_TYPE *)(dst_addr + 57 * dst_stride_z)) = out7.s1;
- *((__global DATA_TYPE *)(dst_addr + 58 * dst_stride_z)) = out7.s2;
- *((__global DATA_TYPE *)(dst_addr + 59 * dst_stride_z)) = out7.s3;
- *((__global DATA_TYPE *)(dst_addr + 60 * dst_stride_z)) = out7.s4;
- *((__global DATA_TYPE *)(dst_addr + 61 * dst_stride_z)) = out7.s5;
- *((__global DATA_TYPE *)(dst_addr + 62 * dst_stride_z)) = out7.s6;
- *((__global DATA_TYPE *)(dst_addr + 63 * dst_stride_z)) = out7.s7;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 7x7/7x1/1x7 and the output tile is 2x2/7x1/1x7 when the data layout is NHWC
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=7).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note Dimension one of the input tensor (width for NHWC data layout) must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM_1=112)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note If this kernel is used to perform Winograd input transform 7x1, -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd input transform 1x7, -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_2x2_7x7_stepz1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- const int x = get_global_id(0);
- const int y = get_global_id(1);
-#if defined(NUM_TILES_Y)
- const int z = get_global_id(2) % NUM_TILES_Y;
- const int b = get_global_id(2) / NUM_TILES_Y;
-#else /* defined(NUM_TILES_Y) */
- const int z = get_global_id(2);
-#endif /* defined(NUM_TILES_Y) */
-
- // Compute input address
-#if defined(NUM_TILES_Y)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + b * src_stride_w;
-#else /* defined(NUM_TILES_Y) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE);
-#endif /* defined(NUM_TILES_Y) */
-
- // Origin coordinates for the width (y) and height (z) in the input tensor
- int8 y_coord0 = (int8)(y * OUTPUT_TILE_W) + (int8)(0, 1, 2, 3, 4, 5, 6, 7) - (int8)PAD_LEFT;
- int8 z_coord0 = (int8)(z * OUTPUT_TILE_H) + (int8)(0, 1, 2, 3, 4, 5, 6, 7) - (int8)PAD_TOP;
-
- // Coordinates to use to avoid out-of-bound reads
- int8 y_coord_valid0 = clamp(y_coord0, (int8)0, (int8)((int)SRC_DIM_1 - 1));
- int8 z_coord_valid0 = clamp(z_coord0, (int8)0, (int8)((int)SRC_DIM_2 - 1));
-
- // Boundary conditions
- int8 y_cond0 = y_coord_valid0 == y_coord0;
- int8 z_cond0 = z_coord_valid0 == z_coord0;
-
-#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
-
- // Load the input tile
- VEC_DATA_TYPE(DATA_TYPE, 8)
- in_row0;
- in_row0.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row0.s, y_cond, z_cond0.s0);
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out0 = (VEC_DATA_TYPE(DATA_TYPE, 8))0.0f;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- tmp0 = ((VEC_DATA_TYPE(DATA_TYPE, 8)) - 36.0f) * in_row0;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact0 = (VEC_DATA_TYPE(DATA_TYPE, 8))0.0f;
-
- OUTPUT_ROW_2x2_7x7(out0, tmp0, comm_fact0);
-
-#elif defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL) // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
- // Load the input tile
- VEC_DATA_TYPE(DATA_TYPE, 8)
- in_row0;
- in_row0.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row0.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row0.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row0.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row0.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row0.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row0.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_V(DATA_TYPE, in_row0.s, y_cond0.s0, z_cond);
-
- // Calculate common factors for intermediate tensor
- VEC_DATA_TYPE(DATA_TYPE, 8)
- tmp0 = ((VEC_DATA_TYPE(DATA_TYPE, 8)) - 36.0f) * in_row0;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out0 = (VEC_DATA_TYPE(DATA_TYPE, 8))0.0f;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact0 = (VEC_DATA_TYPE(DATA_TYPE, 8))0.0f;
-
- OUTPUT_ROW_2x2_7x7(out0, tmp0, comm_fact0);
-#else // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
- VEC_DATA_TYPE(DATA_TYPE, 8)
- in_row0, in_row1, in_row2, in_row3, in_row4, in_row5, in_row6, in_row7;
-
- // Row0
- in_row0.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
- in_row0.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s0 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row0.s, y_cond, z_cond0.s0);
-
- // Row1
- in_row1.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
- in_row1.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s1 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row1.s, y_cond, z_cond0.s1);
-
- // Row2
- in_row2.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
- in_row2.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s2 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row2.s, y_cond, z_cond0.s2);
-
- // Row3
- in_row3.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
- in_row3.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s3 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row3.s, y_cond, z_cond0.s3);
-
- // Row4
- in_row4.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
- in_row4.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s4 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row4.s, y_cond, z_cond0.s4);
-
- // Row5
- in_row5.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
- in_row5.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s5 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row5.s, y_cond, z_cond0.s5);
-
- // Row6
- in_row6.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
- in_row6.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s6 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row6.s, y_cond, z_cond0.s6);
-
- // Row7
- in_row7.s0 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s0 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s1 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s1 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s2 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s2 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s3 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s3 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s4 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s4 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s5 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s5 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s6 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s6 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
- in_row7.s7 = *(__global DATA_TYPE *)(src_addr + y_coord_valid0.s7 * (int)src_stride_y + z_coord_valid0.s7 * src_stride_z);
-
- FILL_ZERO_OUT_OF_BOUND_8_NHWC_H(DATA_TYPE, in_row7.s, y_cond, z_cond0.s7);
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact0 = (DATA_TYPE)36.0f * in_row2 - (DATA_TYPE)13.0f * in_row4 + in_row6;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact1 = (DATA_TYPE)36.0f * in_row1 - (DATA_TYPE)13.0f * in_row3 + in_row5;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact2 = (DATA_TYPE)9.0f * in_row2 - (DATA_TYPE)10.0f * in_row4 + in_row6;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact3 = (DATA_TYPE)18.0f * in_row1 - (DATA_TYPE)20.0f * in_row3 + (DATA_TYPE)2.0f * in_row5;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact4 = (DATA_TYPE)4.0f * in_row2 - (DATA_TYPE)5.0f * in_row4 + in_row6;
- VEC_DATA_TYPE(DATA_TYPE, 8)
- comm_fact5 = (DATA_TYPE)12.0f * in_row1 - (DATA_TYPE)15.0f * in_row3 + (DATA_TYPE)3.0f * in_row5;
-
- // Calculate intermediate tensors
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp0 = -(DATA_TYPE)36.0f * in_row0 + (DATA_TYPE)49.0f * in_row2 - (DATA_TYPE)14.0f * in_row4 + in_row6;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp1 = comm_fact0 - comm_fact1;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp2 = comm_fact0 + comm_fact1;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp3 = comm_fact2 - comm_fact3;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp4 = comm_fact2 + comm_fact3;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp5 = comm_fact4 - comm_fact5;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp6 = comm_fact4 + comm_fact5;
- const VEC_DATA_TYPE(DATA_TYPE, 8) tmp7 = -(DATA_TYPE)36.0f * in_row1 + (DATA_TYPE)49.0f * in_row3 - (DATA_TYPE)14.0f * in_row5 + in_row7;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- out0, out1, out2, out3, out4, out5, out6, out7;
-
- OUTPUT_ROW_2x2_7x7(out0, tmp0, comm_fact0);
- OUTPUT_ROW_2x2_7x7(out1, tmp1, comm_fact0);
- OUTPUT_ROW_2x2_7x7(out2, tmp2, comm_fact0);
- OUTPUT_ROW_2x2_7x7(out3, tmp3, comm_fact0);
- OUTPUT_ROW_2x2_7x7(out4, tmp4, comm_fact0);
- OUTPUT_ROW_2x2_7x7(out5, tmp5, comm_fact0);
- OUTPUT_ROW_2x2_7x7(out6, tmp6, comm_fact0);
- OUTPUT_ROW_2x2_7x7(out7, tmp7, comm_fact0);
-
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-
- // Store values across the channels
-#if defined(NUM_TILES_Y)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + (y + z * (int)NUM_TILES_X) * dst_stride_y + b * dst_stride_w;
-#else /* NUM_TILES_Y */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + (y + z * (int)NUM_TILES_X) * dst_stride_y;
-#endif /* NUM_TILES_Y */
-
- *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_z)) = out0.s0;
- *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_z)) = out0.s1;
- *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_z)) = out0.s2;
- *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_z)) = out0.s3;
- *((__global DATA_TYPE *)(dst_addr + 4 * dst_stride_z)) = out0.s4;
- *((__global DATA_TYPE *)(dst_addr + 5 * dst_stride_z)) = out0.s5;
- *((__global DATA_TYPE *)(dst_addr + 6 * dst_stride_z)) = out0.s6;
- *((__global DATA_TYPE *)(dst_addr + 7 * dst_stride_z)) = out0.s7;
-
-#if !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
- *((__global DATA_TYPE *)(dst_addr + 8 * dst_stride_z)) = out1.s0;
- *((__global DATA_TYPE *)(dst_addr + 9 * dst_stride_z)) = out1.s1;
- *((__global DATA_TYPE *)(dst_addr + 10 * dst_stride_z)) = out1.s2;
- *((__global DATA_TYPE *)(dst_addr + 11 * dst_stride_z)) = out1.s3;
- *((__global DATA_TYPE *)(dst_addr + 12 * dst_stride_z)) = out1.s4;
- *((__global DATA_TYPE *)(dst_addr + 13 * dst_stride_z)) = out1.s5;
- *((__global DATA_TYPE *)(dst_addr + 14 * dst_stride_z)) = out1.s6;
- *((__global DATA_TYPE *)(dst_addr + 15 * dst_stride_z)) = out1.s7;
- *((__global DATA_TYPE *)(dst_addr + 16 * dst_stride_z)) = out2.s0;
- *((__global DATA_TYPE *)(dst_addr + 17 * dst_stride_z)) = out2.s1;
- *((__global DATA_TYPE *)(dst_addr + 18 * dst_stride_z)) = out2.s2;
- *((__global DATA_TYPE *)(dst_addr + 19 * dst_stride_z)) = out2.s3;
- *((__global DATA_TYPE *)(dst_addr + 20 * dst_stride_z)) = out2.s4;
- *((__global DATA_TYPE *)(dst_addr + 21 * dst_stride_z)) = out2.s5;
- *((__global DATA_TYPE *)(dst_addr + 22 * dst_stride_z)) = out2.s6;
- *((__global DATA_TYPE *)(dst_addr + 23 * dst_stride_z)) = out2.s7;
- *((__global DATA_TYPE *)(dst_addr + 24 * dst_stride_z)) = out3.s0;
- *((__global DATA_TYPE *)(dst_addr + 25 * dst_stride_z)) = out3.s1;
- *((__global DATA_TYPE *)(dst_addr + 26 * dst_stride_z)) = out3.s2;
- *((__global DATA_TYPE *)(dst_addr + 27 * dst_stride_z)) = out3.s3;
- *((__global DATA_TYPE *)(dst_addr + 28 * dst_stride_z)) = out3.s4;
- *((__global DATA_TYPE *)(dst_addr + 29 * dst_stride_z)) = out3.s5;
- *((__global DATA_TYPE *)(dst_addr + 30 * dst_stride_z)) = out3.s6;
- *((__global DATA_TYPE *)(dst_addr + 31 * dst_stride_z)) = out3.s7;
- *((__global DATA_TYPE *)(dst_addr + 32 * dst_stride_z)) = out4.s0;
- *((__global DATA_TYPE *)(dst_addr + 33 * dst_stride_z)) = out4.s1;
- *((__global DATA_TYPE *)(dst_addr + 34 * dst_stride_z)) = out4.s2;
- *((__global DATA_TYPE *)(dst_addr + 35 * dst_stride_z)) = out4.s3;
- *((__global DATA_TYPE *)(dst_addr + 36 * dst_stride_z)) = out4.s4;
- *((__global DATA_TYPE *)(dst_addr + 37 * dst_stride_z)) = out4.s5;
- *((__global DATA_TYPE *)(dst_addr + 38 * dst_stride_z)) = out4.s6;
- *((__global DATA_TYPE *)(dst_addr + 39 * dst_stride_z)) = out4.s7;
- *((__global DATA_TYPE *)(dst_addr + 40 * dst_stride_z)) = out5.s0;
- *((__global DATA_TYPE *)(dst_addr + 41 * dst_stride_z)) = out5.s1;
- *((__global DATA_TYPE *)(dst_addr + 42 * dst_stride_z)) = out5.s2;
- *((__global DATA_TYPE *)(dst_addr + 43 * dst_stride_z)) = out5.s3;
- *((__global DATA_TYPE *)(dst_addr + 44 * dst_stride_z)) = out5.s4;
- *((__global DATA_TYPE *)(dst_addr + 45 * dst_stride_z)) = out5.s5;
- *((__global DATA_TYPE *)(dst_addr + 46 * dst_stride_z)) = out5.s6;
- *((__global DATA_TYPE *)(dst_addr + 47 * dst_stride_z)) = out5.s7;
- *((__global DATA_TYPE *)(dst_addr + 48 * dst_stride_z)) = out6.s0;
- *((__global DATA_TYPE *)(dst_addr + 49 * dst_stride_z)) = out6.s1;
- *((__global DATA_TYPE *)(dst_addr + 50 * dst_stride_z)) = out6.s2;
- *((__global DATA_TYPE *)(dst_addr + 51 * dst_stride_z)) = out6.s3;
- *((__global DATA_TYPE *)(dst_addr + 52 * dst_stride_z)) = out6.s4;
- *((__global DATA_TYPE *)(dst_addr + 53 * dst_stride_z)) = out6.s5;
- *((__global DATA_TYPE *)(dst_addr + 54 * dst_stride_z)) = out6.s6;
- *((__global DATA_TYPE *)(dst_addr + 55 * dst_stride_z)) = out6.s7;
- *((__global DATA_TYPE *)(dst_addr + 56 * dst_stride_z)) = out7.s0;
- *((__global DATA_TYPE *)(dst_addr + 57 * dst_stride_z)) = out7.s1;
- *((__global DATA_TYPE *)(dst_addr + 58 * dst_stride_z)) = out7.s2;
- *((__global DATA_TYPE *)(dst_addr + 59 * dst_stride_z)) = out7.s3;
- *((__global DATA_TYPE *)(dst_addr + 60 * dst_stride_z)) = out7.s4;
- *((__global DATA_TYPE *)(dst_addr + 61 * dst_stride_z)) = out7.s5;
- *((__global DATA_TYPE *)(dst_addr + 62 * dst_stride_z)) = out7.s6;
- *((__global DATA_TYPE *)(dst_addr + 63 * dst_stride_z)) = out7.s7;
-#endif // !defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-}
-#endif // defined(SRC_DIM_1) && defined(SRC_DIM_2)
-
-#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
-/** This OpenCL kernel computes the input transform when the kernel size is 3x1 and the output tile is 2x1
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_2x1_3x1_stepz1_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_2x2_3x3_stepz1_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 3x1, the output tile is 2x1 and the number of channels is multiple of 2
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_2x1_3x1_stepz2_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_2x2_3x3_stepz2_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 3x1 and the output tile is 4x1
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_4x1_3x1_stepz1_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_4x4_3x3_stepz1_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 5x1 and the output tile is 4x1 when the data layout is NCHW
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_4x1_5x1_stepz1_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_4x4_5x5_stepz1_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-#if defined(SRC_DIM_1) && defined(SRC_DIM_2)
-/** This OpenCL kernel computes the input transform when the kernel size is 3x1 and the output tile is 4x1 for data layout NHWC
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note Dimension one of the input tensor (width for NHWC data layout) must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM_1=112)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_4x1_3x1_stepz1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_4x4_3x3_stepz1_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 5x1 and the output tile is 4x1 for data layout NHWC
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note Dimension one of the input tensor (width for NHWC data layout) must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM_1=112)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_4x1_5x1_stepz1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_4x4_5x5_stepz1_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 7x1 and the output tile is 2x1 for data layout NHWC
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=7).
- * @note Dimension one of the input tensor (width for NHWC data layout) must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM_1=112)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=7
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note -DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_2x1_7x1_stepz1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_2x2_7x7_stepz1_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-#endif // defined(NUM_TILES_Y) && defined(SRC_DIM_1) && defined(SRC_DIM_2)
-#endif // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
-
-#if defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-/** This OpenCL kernel computes the input transform when the kernel size is 1x3 and the output tile is 1x2
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_1x2_1x3_stepz1_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_2x2_3x3_stepz1_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 1x3, the output tile is 1x2 and the number of channels is multiple of 2
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_1x2_1x3_stepz2_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_2x2_3x3_stepz2_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 1x3 and the output tile is 1x4
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_1x4_1x3_stepz1_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_4x4_3x3_stepz1_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 1x5 and the output tile is 1x4
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_1x4_1x5_stepz1_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_4x4_5x5_stepz1_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-#if defined(SRC_DIM_1) && defined(SRC_DIM_2)
-/** This OpenCL kernel computes the input transform when the kernel size is 1x3 and the output tile is 1x4 for data layout NHWC
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note Dimension one of the input tensor (width for NHWC data layout) must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM_1=112)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_1x4_1x3_stepz1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_4x4_3x3_stepz1_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 1x5 and the output tile is 1x4 for data layout NHWC
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=5).
- * @note Dimension one of the input tensor (width for NHWC data layout) must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM_1=112)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_1x4_1x5_stepz1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_4x4_5x5_stepz1_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-
-/** This OpenCL kernel computes the input transform when the kernel size is 1x7 and the output tile is 1x2 for data layout NHWC
- *
- * @note The number of tiles in the x axis must be passed at compile time using -DNUM_TILES_X (i.e.-DNUM_TILES_X=7).
- * @note Dimension one of the input tensor (width for NHWC data layout) must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM_1=112)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The pad left and pad top must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (i.e.-DPAD_LEFT=1 and -DPAD_TOP=0).
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=7
- * @note -DWINOGRAD_INPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- */
-__kernel void winograd_input_transform_1x2_1x7_stepz1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- uint src_stride_w,
- uint dst_stride_w)
-{
- winograd_input_transform_2x2_7x7_stepz1_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_offset_first_element_in_bytes,
- src_stride_w,
- dst_stride_w);
-}
-#endif // defined(SRC_DIM_1) && defined(SRC_DIM_2)
-#endif // defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
-#endif // defined(NUM_TILES_X) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
diff --git a/src/core/CL/cl_kernels/winograd_output_transform.cl b/src/core/CL/cl_kernels/winograd_output_transform.cl
deleted file mode 100644
index 0a7b5f50b2..0000000000
--- a/src/core/CL/cl_kernels/winograd_output_transform.cl
+++ /dev/null
@@ -1,2266 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#include "activation_float_helpers.h"
-
-#if defined(NUM_TILES_X) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
-#if defined(VEC_SIZE) && VEC_SIZE == 2
-/** This OpenCL kernel performs Winograd output transform when the output tile is 2x2/2x1 or 1x2, the filter size 3x3/3x1 or 1x3 and the data layout is NCHW
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note If this kernel is used to perform Winograd output transform 3x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd output transform 1x3, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. Accepted values are -DVEC_SIZE=2 (for output_tile_size 2x2, 2x1, 1x2) and -DVEC_SIZE=4 (for output_tile_size 4x4, 4x1, 1x4)
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_2x2_3x3_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(bias)
-#endif // defined(HAS_BIAS)
-)
-{
- // Each thread stores a 2x2/2x1 or 1x2 tile accordingly with the filter size
-#if defined(SRC_DEPTH)
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-#else /* defined(SRC_DEPTH) */
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
-#endif /* defined(SRC_DEPTH) */
-
- // Load the values across the 16 or 4 channels to compose the 4x4 or 4x1 tile
- DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
- DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
- DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
- DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
-
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- // Compute the 2x1 or 1x2 output tile
- // out00 = d00 + d01 + d02
- // out01 = d01 - d02 - d03
-
- float out00 = d00 + d01 + d02;
- float out01 = d01 - d02 - d03;
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
- DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
- DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
- DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
-
- DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
- DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
- DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
- DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
-
- DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
- DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
- DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
- DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
-
- // Compute the 2x2 output tile
- float k0 = d01 + d11 + d21;
- float k1 = d02 + d12 + d22;
- float k2 = d11 - d21 - d31;
- float k3 = d12 - d22 - d32;
-
- // out00 = d00 + d10 + d20 + d01 + d11 + d21 + d02 + d12 + d22
- // out01 = d01 + d11 + d21 - (d02 + d12 + d22) - (d03 + d13 + d23)
- // out10 = d10 - d20 - d30 + (d11 - d21 - d31) + (d12 - d22 - d32)
- // out11 = d11 - d21 - d31 - (d12 - d22 - d32) - (d13 - d23 - d33)
-
- float out00 = d10;
- float out01 = -d13;
- float out10 = d10;
- float out11 = -d13;
-
- out00 += d00 + d20 + k0 + k1;
- out01 += k0 - k1 - (d03 + d23);
- out10 += -d20 - d30 + k2 + k3;
- out11 += k2 - k3 + d23 + d33;
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- int y_in = get_global_id(1);
- int x_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
- int y_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
- int z_out = get_global_id(0);
-#if defined(SRC_DEPTH)
- int batch = get_global_id(2) / SRC_DEPTH;
-#endif /* defined(SRC_DEPTH) */
-
-#if defined(HAS_BIAS)
- // Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
-
- out00 += (float)b;
- out01 += (float)b;
-#endif // defined(HAS_BIAS)
-
- // Get output address
-#if defined(SRC_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z + batch * dst_stride_w;
-#else /* defined(SRC_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z;
-#endif /* defined(SRC_DEPTH) */
-
- // Store the output tile
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- const VEC_DATA_TYPE(DATA_TYPE, 2)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL);
- *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)) = out0_dt.s0;
- *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)) = out0_dt.s1;
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
-#if !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-#if defined(HAS_BIAS)
- // Add bias
- out10 += (DATA_TYPE)b;
- out11 += (DATA_TYPE)b;
-#endif // defined(HAS_BIAS)
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out10, out11), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
-#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-}
-
-#define COMPUTE_TMP_COL_2x2_7x7(col, d0, d1, d2, d3, d4, d5, d6, d7) \
- ({ \
- col.s0 = d0 + d1 + d2 + d3 + d4 + d5 + d6; \
- col.s1 = -d1 + d2 - 2 * d3 + 2 * d4 + -3 * d5 + 3 * d6 + d7; \
- })
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 2x2/2x1 or 1x2, the filter size 7x7/7x1 or 1x7 and the data layout is NHWC
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
- * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
- * @note If this kernel is used to perform Winograd output transform 7x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd output transform 1x7, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_2x2_7x7_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif // defined(HAS_BIAS)
- int dst_size)
-{
- // Each thread stores a 4x4/4x1 or 1x4 tile
-#if defined(SRC_DEPTH)
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-#else /* defined(SRC_DEPTH) */
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
-#endif /* defined(SRC_DEPTH) */
-
- int y_in = get_global_id(1);
- int x_out = get_global_id(0);
- int y_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
- int z_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
-#if defined(SRC_DEPTH)
- int batch = get_global_id(2) / SRC_DEPTH;
-#endif /* defined(SRC_DEPTH) */
-
- __global unsigned char *dst_base_ptr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE);
-
-#if defined(SRC_DEPTH)
- dst_base_ptr += batch * dst_stride_w;
-#endif // defined(SRC_DEPTH)
-
- // Load the values across the channels to compose the input tile
- DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
- DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
- DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
- DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
- DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
- DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
- DATA_TYPE d06 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
- DATA_TYPE d07 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
-
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- // Compute out00, out01, out02 and out03
- float out00 = d00 + d01 + d02 + d03 + d04 + d05 + d06;
- float out01 = -d01 + d02 - 2.f * d03 + 2.0f * d04 - 3.0f * d05 + 3.0f * d06 + d07;
-
-#if defined(HAS_BIAS)
- // Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, x_out)));
-
- out00 += (float)b;
- out01 += (float)b;
-#endif // defined(HAS_BIAS)
-
- // Store the output tile
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- dst_base_ptr += y_out * dst_stride_y;
-
- int2 offset_z = min((int2)z_out + (int2)(0, 1), (int2)((int)DST_HEIGHT - 1)) * (int2)dst_stride_z;
-
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *(__global DATA_TYPE *)(dst_base_ptr + offset_z.s1) = out0_dt.s1;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_z.s0) = out0_dt.s0;
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- dst_base_ptr += z_out * dst_stride_z;
-
- int2 offset_y = min((int2)y_out + (int2)(0, 1), (int2)((int)DST_WIDTH - 1)) * (int2)dst_stride_y;
-
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL,
- B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s1) = out0_dt.s1;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s0) = out0_dt.s0;
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
- DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
- DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
- DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
- DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
- DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
- DATA_TYPE d16 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
- DATA_TYPE d17 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
-
- DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
- DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
- DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
- DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
- DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
- DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
- DATA_TYPE d26 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
- DATA_TYPE d27 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
-
- DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
- DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
- DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
- DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
- DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
- DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
- DATA_TYPE d36 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
- DATA_TYPE d37 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
-
- DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
- DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
- DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
- DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
- DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 36 * src_stride_z));
- DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 37 * src_stride_z));
- DATA_TYPE d46 = *((__global DATA_TYPE *)(src_addr + 38 * src_stride_z));
- DATA_TYPE d47 = *((__global DATA_TYPE *)(src_addr + 39 * src_stride_z));
-
- DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 40 * src_stride_z));
- DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 41 * src_stride_z));
- DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 42 * src_stride_z));
- DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 43 * src_stride_z));
- DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 44 * src_stride_z));
- DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 45 * src_stride_z));
- DATA_TYPE d56 = *((__global DATA_TYPE *)(src_addr + 46 * src_stride_z));
- DATA_TYPE d57 = *((__global DATA_TYPE *)(src_addr + 47 * src_stride_z));
-
- DATA_TYPE d60 = *((__global DATA_TYPE *)(src_addr + 48 * src_stride_z));
- DATA_TYPE d61 = *((__global DATA_TYPE *)(src_addr + 49 * src_stride_z));
- DATA_TYPE d62 = *((__global DATA_TYPE *)(src_addr + 50 * src_stride_z));
- DATA_TYPE d63 = *((__global DATA_TYPE *)(src_addr + 51 * src_stride_z));
- DATA_TYPE d64 = *((__global DATA_TYPE *)(src_addr + 52 * src_stride_z));
- DATA_TYPE d65 = *((__global DATA_TYPE *)(src_addr + 53 * src_stride_z));
- DATA_TYPE d66 = *((__global DATA_TYPE *)(src_addr + 54 * src_stride_z));
- DATA_TYPE d67 = *((__global DATA_TYPE *)(src_addr + 55 * src_stride_z));
-
- DATA_TYPE d70 = *((__global DATA_TYPE *)(src_addr + 56 * src_stride_z));
- DATA_TYPE d71 = *((__global DATA_TYPE *)(src_addr + 57 * src_stride_z));
- DATA_TYPE d72 = *((__global DATA_TYPE *)(src_addr + 58 * src_stride_z));
- DATA_TYPE d73 = *((__global DATA_TYPE *)(src_addr + 59 * src_stride_z));
- DATA_TYPE d74 = *((__global DATA_TYPE *)(src_addr + 60 * src_stride_z));
- DATA_TYPE d75 = *((__global DATA_TYPE *)(src_addr + 61 * src_stride_z));
- DATA_TYPE d76 = *((__global DATA_TYPE *)(src_addr + 62 * src_stride_z));
- DATA_TYPE d77 = *((__global DATA_TYPE *)(src_addr + 63 * src_stride_z));
-
- // Compute the 8x2 intermediate tensor
- VEC_DATA_TYPE(float, 2)
- tmp_col0, tmp_col1, tmp_col2, tmp_col3, tmp_col4, tmp_col5, tmp_col6, tmp_col7;
-
- COMPUTE_TMP_COL_2x2_7x7(tmp_col0, d00, d10, d20, d30, d40, d50, d60, d70);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col1, d01, d11, d21, d31, d41, d51, d61, d71);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col2, d02, d12, d22, d32, d42, d52, d62, d72);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col3, d03, d13, d23, d33, d43, d53, d63, d73);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col4, d04, d14, d24, d34, d44, d54, d64, d74);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col5, d05, d15, d25, d35, d45, d55, d65, d75);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col6, d06, d16, d26, d36, d46, d56, d66, d76);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col7, d07, d17, d27, d37, d47, d57, d67, d77);
-
- // Compute the 2x2 output tile
- VEC_DATA_TYPE(float, 2)
- out_col0 = tmp_col0 + tmp_col1 + tmp_col2 + tmp_col3 + tmp_col4 + tmp_col5 + tmp_col6;
- VEC_DATA_TYPE(float, 2)
- out_col1 = -tmp_col1 + tmp_col2 - 2 * tmp_col3 + 2 * tmp_col4 - 3 * tmp_col5 + 3 * tmp_col6 + tmp_col7;
-
-#if defined(HAS_BIAS)
- // Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- DATA_TYPE b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, x_out)));
-
- out_col0 += (VEC_DATA_TYPE(float, 2))b;
- out_col1 += (VEC_DATA_TYPE(float, 2))b;
-
-#endif // defined(HAS_BIAS)
-
- int2 offset_y = min((int2)y_out + (int2)(0, 1), (int2)((int)DST_WIDTH - 1)) * (int2)dst_stride_y;
- int2 offset_z = min((int2)z_out + (int2)(0, 1), (int2)((int)DST_HEIGHT - 1)) * (int2)dst_stride_z;
-
- // Store the output tile
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- out_col0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- out_col1_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col1, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s1) = out_col1_dt.s1;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s0) = out_col1_dt.s0;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s1) = out_col0_dt.s1;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s0) = out_col0_dt.s0;
-
-#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-}
-#endif // defined(VEC_SIZE) && VEC_SIZE == 2
-
-#if defined(VEC_SIZE) && VEC_SIZE == 4
-/** This OpenCL kernel performs Winograd output transform when the output tile is 4x4, the filter size 3x3 and the data layout is NCHW
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note If this kernel is used to perform Winograd output transform 3x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd output transform 1x3, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_4x4_3x3_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(bias)
-#endif // defined(HAS_BIAS)
-)
-{
- // Each thread stores a 4x4/4x1 or 1x4 tile
-#if defined(SRC_DEPTH)
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-#else /* defined(SRC_DEPTH) */
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
-#endif /* defined(SRC_DEPTH) */
-
- // Load the values across the channels to compose the 6x6 or 6x1 tile
- DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
- DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
- DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
- DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
- DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
- DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
-
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- // Compute out00, out01, out02 and out03
- float out00 = d00 + d01 + d02 + d03 + d04;
- float out01 = d01 - d02 + 2.0f * d03 - 2.0f * d04;
- float out02 = d01 + d02 + 4.0f * d03 + 4.0f * d04;
- float out03 = d01 - d02 + 8.0f * d03 - 8.0f * d04 + d05;
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
- DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
- DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
- DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
- DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
- DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
-
- DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
- DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
- DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
- DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
- DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
- DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
-
- DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
- DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
- DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
- DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
- DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
- DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
-
- DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
- DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
- DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
- DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
- DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
- DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
-
- DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
- DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
- DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
- DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
- DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
- DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
-
- // Compute out00, out01, out02 and out03
- float out00 = (float)d01 + (float)d21 + (float)d41 + (float)d11 + (float)d31;
- float out01 = (float)d01 + (float)d21 + (float)d41 + (float)d11 + (float)d31;
- float out02 = (float)d01 + (float)d21 + (float)d41 + (float)d11 + (float)d31;
- float out03 = (float)d01 + d21 + (float)d41 + (float)d11 + (float)d31;
-
- float k0 = d03 + d04 + d13 + d14 + d23 + d24 + d33 + d34 + d43 + d44;
- float k1 = 2.0f * d03 - 2.0f * d04 + 2.0f * d13 - 2.0f * d14 + 2.0f * d23 - 2.0f * d24 + 2.0f * d33 - 2.0f * d34 + 2.0f * d43 - 2.0f * d44;
-
- out00 += k0 + d00 + d02 + d10 + d12 + d20 + d22 + d30 + d32 + d40 + d42;
- out01 += k1 - d02 - d12 - d22 - d32 - d42;
- out02 += 4.0f * k0 + d02 + d12 + d22 + d32 + d42;
- out03 += 4.0f * k1 - d02 - d12 - d22 - d32 - d42 + d05 + d15 + d25 + d35 + d45;
-
- // Compute out10, out11, out12 and out13
- float out10 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
- float out11 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
- float out12 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
- float out13 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
-
- k0 = d13 + d14 - d23 - d24 + 2.0f * d33 + 2.0f * d34 - 2.0f * d43 - 2.0f * d44;
- k1 = 2.0f * d13 - 2.0f * d14 - 2.0f * d23 + 2.0f * d24 + 4.0f * d33 - 4.0f * d34 - 4.0f * d43 + 4.0f * d44;
-
- out10 += k0 + d10 + d12 - d20 - d22 + 2.0f * d30 + 2.0f * d32 - 2.0f * d40 - 2.0f * d42;
- out11 += k1 - d12 + d22 - 2.0f * d32 + 2.0f * d42;
- out12 += 4.0f * k0 + d12 - d22 + 2.0f * d32 - 2.0f * d42;
- out13 += 4.0f * k1 - d12 + d15 + d22 - d25 - 2.0f * d32 + 2.0f * d35 + 2.0f * d42 - 2.0f * d45;
-
- // Compute out20, out21, out22 and out23
- float out20 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
- float out21 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
- float out22 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
- float out23 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
-
- k0 = d13 + d14 + d23 + d24 + 4.0f * d33 + 4.0f * d34 + 4.0f * d43 + 4.0f * d44;
- k1 = 2.0f * d13 - 2.0f * d14 + 2.0f * d23 - 2.0f * d24 + 8.0f * d33 - 8.0f * d34 + 8.0f * d43 - 8.0f * d44;
-
- out20 += k0 + d10 + d12 + d20 + d22 + 4.0f * d30 + 4.0f * d32 + 4.0f * d40 + 4.0f * d42;
- out21 += k1 - d12 - d22 - 4.0f * d32 - 4.0f * d42;
- out22 += 4.0f * k0 + d12 + d22 + 4.0f * d32 + 4.0f * d42;
- out23 += 4.0f * k1 - d12 + d15 - d22 + d25 - 4.0f * d32 + 4.0f * d35 - 4.0f * d42 + 4.0f * d45;
-
- // Compute out30, out31, out32 and out33
- float out30 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
- float out31 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
- float out32 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
- float out33 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
-
- k0 = d13 + d14 - d23 - d24 + 8.0f * d33 + 8.0f * d34 - 8.0f * d43 - 8.0f * d44 + d53 + d54;
- k1 = 2.0f * d13 - 2.0f * d14 - 2.0f * d23 + 2.0f * d24 + 16.0f * d33 - 16.0f * d34 - 16.0f * d43 + 16.0f * d44 + 2.0f * d53 - 2.0f * d54;
-
- out30 += k0 + d10 + d12 - d20 - d22 + 8.0f * d30 + 8.0f * d32 - 8.0f * d40 - 8.0f * d42 + d50 + d52;
- out31 += k1 - d12 + d22 - 8.0f * d32 + 8.0f * d42 - d52;
- out32 += 4.0f * k0 + d12 - d22 + 8.0f * d32 - 8.0f * d42 + d52;
- out33 += 4.0f * k1 - d12 + d15 + d22 - d25 - 8.0f * d32 + 8.0f * d35 + 8.0f * d42 - 8.0f * d45 - d52 + d55;
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- int y_in = get_global_id(1);
- int x_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
- int y_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
- int z_out = get_global_id(0);
-#if defined(SRC_DEPTH)
- int batch = get_global_id(2) / SRC_DEPTH;
-#endif /* defined(SRC_DEPTH) */
-
-#if defined(HAS_BIAS)
- // Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
-
- out00 += (float)b;
- out01 += (float)b;
- out02 += (float)b;
- out03 += (float)b;
-#endif // defined(HAS_BIAS)
-
- // Get output address
-#if defined(SRC_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z + batch * dst_stride_w;
-#else /* defined(SRC_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z;
-#endif /* defined(SRC_DEPTH) */
-
- // Store the output tile
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL,
- B_VAL);
- *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)) = out0_dt.s0;
- *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)) = out0_dt.s1;
- *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y)) = out0_dt.s2;
- *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y)) = out0_dt.s3;
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
-#if !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-#if defined(HAS_BIAS)
- // Add bias
- out10 += (float)b;
- out11 += (float)b;
- out12 += (float)b;
- out13 += (float)b;
-
- out20 += (float)b;
- out21 += (float)b;
- out22 += (float)b;
- out23 += (float)b;
-
- out30 += (float)b;
- out31 += (float)b;
- out32 += (float)b;
- out33 += (float)b;
-#endif // defined(HAS_BIAS)
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out10, out11, out12, out13), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out20, out21, out22, out23), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out30, out31, out32, out33), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y));
-#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 4x4, 4x1 or 1x4, the filter size 3x3, 3x1 or 1x3 and the data layout is NHWC
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
- * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
- * @note If this kernel is used to perform Winograd output transform 3x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd output transform 1x3, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] dst_size Size of the destination tensor, minus the last padding
- */
-__kernel void winograd_output_transform_4x4_3x3_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif // defined(HAS_BIAS)
- int dst_size)
-{
- // Each thread stores a 4x4/4x1 or 1x4 tile
-#if defined(SRC_DEPTH)
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-#else /* defined(SRC_DEPTH) */
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
-#endif /* defined(SRC_DEPTH) */
-
- // Load the values across the 36 channels to compose the 6x6 or 6x1 tile
- DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
- DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
- DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
- DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
- DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
- DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
-
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- // Compute out00, out01, out02 and out03
- float out00 = d00 + d01 + d02 + d03 + d04;
- float out01 = d01 - d02 + 2.0f * d03 - 2.0f * d04;
- float out02 = d01 + d02 + 4.0f * d03 + 4.0f * d04;
- float out03 = d01 - d02 + 8.0f * d03 - 8.0f * d04 + d05;
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
- DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
- DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
- DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
- DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
- DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
-
- DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
- DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
- DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
- DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
- DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
- DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
-
- DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
- DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
- DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
- DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
- DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
- DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
-
- DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
- DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
- DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
- DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
- DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
- DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
-
- DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
- DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
- DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
- DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
- DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
- DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
-
- // Compute out00, out01, out02 and out03
- float out00 = d01 + d21 + d41 + d11 + d31;
- float out01 = d01 + d21 + d41 + d11 + d31;
- float out02 = d01 + d21 + d41 + d11 + d31;
- float out03 = d01 + d21 + d41 + d11 + d31;
-
- float k0 = d03 + d04 + d13 + d14 + d23 + d24 + d33 + d34 + d43 + d44;
- float k1 = 2.0f * d03 - 2.0f * d04 + 2.0f * d13 - 2.0f * d14 + 2.0f * d23 - 2.0f * d24 + 2.0f * d33 - 2.0f * d34 + 2.0f * d43 - 2.0f * d44;
-
- out00 += k0 + d00 + d02 + d10 + d12 + d20 + d22 + d30 + d32 + d40 + d42;
- out01 += k1 - d02 - d12 - d22 - d32 - d42;
- out02 += 4.0f * k0 + d02 + d12 + d22 + d32 + d42;
- out03 += 4.0f * k1 - d02 - d12 - d22 - d32 - d42 + d05 + d15 + d25 + d35 + d45;
-
- // Compute out10, out11, out12 and out13
- float out10 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
- float out11 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
- float out12 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
- float out13 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
-
- k0 = d13 + d14 - d23 - d24 + 2.0f * d33 + 2.0f * d34 - 2.0f * d43 - 2.0f * d44;
- k1 = 2.0f * d13 - 2.0f * d14 - 2.0f * d23 + 2.0f * d24 + 4.0f * d33 - 4.0f * d34 - 4.0f * d43 + 4.0f * d44;
-
- out10 += k0 + d10 + d12 - d20 - d22 + 2.0f * d30 + 2.0f * d32 - 2.0f * d40 - 2.0f * d42;
- out11 += k1 - d12 + d22 - 2.0f * d32 + 2.0f * d42;
- out12 += 4.0f * k0 + d12 - d22 + 2.0f * d32 - 2.0f * d42;
- out13 += 4.0f * k1 - d12 + d15 + d22 - d25 - 2.0f * d32 + 2.0f * d35 + 2.0f * d42 - 2.0f * d45;
-
- // Compute out20, out21, out22 and out23
- float out20 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
- float out21 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
- float out22 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
- float out23 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
-
- k0 = d13 + d14 + d23 + d24 + 4.0f * d33 + 4.0f * d34 + 4.0f * d43 + 4.0f * d44;
- k1 = 2.0f * d13 - 2.0f * d14 + 2.0f * d23 - 2.0f * d24 + 8.0f * d33 - 8.0f * d34 + 8.0f * d43 - 8.0f * d44;
-
- out20 += k0 + d10 + d12 + d20 + d22 + 4.0f * d30 + 4.0f * d32 + 4.0f * d40 + 4.0f * d42;
- out21 += k1 - d12 - d22 - 4.0f * d32 - 4.0f * d42;
- out22 += 4.0f * k0 + d12 + d22 + 4.0f * d32 + 4.0f * d42;
- out23 += 4.0f * k1 - d12 + d15 - d22 + d25 - 4.0f * d32 + 4.0f * d35 - 4.0f * d42 + 4.0f * d45;
-
- // Compute out30, out31, out32 and out33
- float out30 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
- float out31 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
- float out32 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
- float out33 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
-
- k0 = d13 + d14 - d23 - d24 + 8.0f * d33 + 8.0f * d34 - 8.0f * d43 - 8.0f * d44 + d53 + d54;
- k1 = 2.0f * d13 - 2.0f * d14 - 2.0f * d23 + 2.0f * d24 + 16.0f * d33 - 16.0f * d34 - 16.0f * d43 + 16.0f * d44 + 2.0f * d53 - 2.0f * d54;
-
- out30 += k0 + d10 + d12 - d20 - d22 + 8.0f * d30 + 8.0f * d32 - 8.0f * d40 - 8.0f * d42 + d50 + d52;
- out31 += k1 - d12 + d22 - 8.0f * d32 + 8.0f * d42 - d52;
- out32 += 4.0f * k0 + d12 - d22 + 8.0f * d32 - 8.0f * d42 + d52;
- out33 += 4.0f * k1 - d12 + d15 + d22 - d25 - 8.0f * d32 + 8.0f * d35 + 8.0f * d42 - 8.0f * d45 - d52 + d55;
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- int y_in = get_global_id(1);
- int x_out = get_global_id(0);
- int y_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
- int z_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
-#if defined(SRC_DEPTH)
- int batch = get_global_id(2) / SRC_DEPTH;
-#endif /* defined(SRC_DEPTH) */
-
-#if defined(HAS_BIAS)
- // Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- DATA_TYPE b = (DATA_TYPE) * ((__global DATA_TYPE *)(vector_offset(&bias, x_out)));
-
- out00 += (DATA_TYPE)b;
- out01 += (DATA_TYPE)b;
- out02 += (DATA_TYPE)b;
- out03 += (DATA_TYPE)b;
-#if !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) & !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- out10 += (DATA_TYPE)b;
- out11 += (DATA_TYPE)b;
- out12 += (DATA_TYPE)b;
- out13 += (DATA_TYPE)b;
-
- out20 += (DATA_TYPE)b;
- out21 += (DATA_TYPE)b;
- out22 += (DATA_TYPE)b;
- out23 += (DATA_TYPE)b;
-
- out30 += (DATA_TYPE)b;
- out31 += (DATA_TYPE)b;
- out32 += (DATA_TYPE)b;
- out33 += (DATA_TYPE)b;
-#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) & !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
-#endif // defined(HAS_BIAS)
-
- __global unsigned char *dst_base_ptr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE);
-
-#if defined(SRC_DEPTH)
- dst_base_ptr += batch * dst_stride_w;
-#endif // defined(SRC_DEPTH)
-
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- dst_base_ptr += y_out * dst_stride_y;
-
- int4 offset_z = min((int4)z_out + (int4)(0, 1, 2, 3), (int4)((int)DST_HEIGHT - 1)) * (int4)dst_stride_z;
-
- // Store the 1x4 output tile
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL,
- B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s3)) = out0_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s2)) = out0_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s1)) = out0_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s0)) = out0_dt.s0;
-
-#elif defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
-
- dst_base_ptr += z_out * dst_stride_z;
-
- int4 offset_y = min((int4)y_out + (int4)(0, 1, 2, 3), (int4)((int)DST_WIDTH - 1)) * (int4)dst_stride_y;
-
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)),
- A_VAL, B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3)) = out0_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2)) = out0_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1)) = out0_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0)) = out0_dt.s0;
-
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
-
- int4 offset_y = min((int4)y_out + (int4)(0, 1, 2, 3), (int4)((int)DST_WIDTH - 1)) * (int4)dst_stride_y;
- int4 offset_z = min((int4)z_out + (int4)(0, 1, 2, 3), (int4)((int)DST_HEIGHT - 1)) * (int4)dst_stride_z;
-
- // Store the 4x4 output tile
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, B_VAL);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out1_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out10, out11, out12, out13), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, B_VAL);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out2_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out20, out21, out22, out23), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, B_VAL);
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out3_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out30, out31, out32, out33),
- VEC_DATA_TYPE(DATA_TYPE, 4)),
- A_VAL, B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s3)) = out3_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s3)) = out3_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s3)) = out3_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s3)) = out3_dt.s0;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s2)) = out2_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s2)) = out2_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s2)) = out2_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s2)) = out2_dt.s0;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s1)) = out1_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s1)) = out1_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s1)) = out1_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s1)) = out1_dt.s0;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s0)) = out0_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s0)) = out0_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s0)) = out0_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s0)) = out0_dt.s0;
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
-}
-
-#define COMPUTE_TMP_COL(col, d0, d1, d2, d3, d4, d5, d6, d7, comm_fact) \
- ({ \
- comm_fact.s0 = d1 + d2; \
- comm_fact.s1 = d3 + d4; \
- comm_fact.s2 = d5 + d6; \
- \
- col.s0 = comm_fact.s0 + comm_fact.s1 + 8.f * comm_fact.s2 + d0; \
- col.s2 = comm_fact.s0 + 4.f * comm_fact.s1 + 2.f * comm_fact.s2; \
- \
- comm_fact.s0 = d1 - d2; \
- comm_fact.s1 = d3 - d4; \
- comm_fact.s2 = d5 - d6; \
- \
- col.s1 = comm_fact.s0 + 2.f * comm_fact.s1 + 4.f * comm_fact.s2; \
- col.s3 = comm_fact.s0 + 8.f * comm_fact.s1 + comm_fact.s2 + d7; \
- })
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 4x4/4x1 or 1x4, the filter size 5x5/5x1 or 1x5 and the data layout is NCHW
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note If this kernel is used to perform Winograd output transform 3x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd output transform 1x3, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_4x4_5x5_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(bias)
-#endif // defined(HAS_BIAS)
-)
-{
- // Each thread stores a 4x4/4x1 or 1x4 tile
-#if defined(SRC_DEPTH)
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-#else /* defined(SRC_DEPTH) */
-
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
-#endif /* defined(SRC_DEPTH) */
-
- // Compute output address
- int y_in = get_global_id(1);
- int x_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
- int y_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
- int z_out = get_global_id(0);
-#if defined(SRC_DEPTH)
- int batch = get_global_id(2) / SRC_DEPTH;
-#endif /* defined(SRC_DEPTH) */
-
-#if defined(SRC_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z + batch * dst_stride_w;
-#else /* defined(SRC_DEPTH) */
-
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z;
-#endif /* defined(SRC_DEPTH) */
-
- // Load the values across the channels to compose the input tile
- DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
- DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
- DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
- DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
- DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
- DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
- DATA_TYPE d06 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
- DATA_TYPE d07 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
-
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- // Compute out00, out01, out02 and out03
- float out00 = d00 + d01 + d02 + d03 + d04 + 8.0f * d05 + 8.0f * d06;
- float out01 = d01 - d02 + 2.0f * d03 - 2.0f * d04 + 4.0f * d05 - 4.0f * d06;
- float out02 = d01 + d02 + 4.0f * d03 + 4.0f * d04 + 2.0f * d05 + 2.0f * d06;
- float out03 = d01 - d02 + 8.0f * d03 - 8.0f * d04 + d05 - d06 + d07;
-
-#if defined(HAS_BIAS)
- // Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
-
- out00 += (DATA_TYPE)b;
- out01 += (DATA_TYPE)b;
- out02 += (DATA_TYPE)b;
- out03 += (DATA_TYPE)b;
-#endif // defined(HAS_BIAS)
-
- // Store the output tile
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL,
- B_VAL);
- *((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)) = out0_dt.s0;
- *((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)) = out0_dt.s1;
- *((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y)) = out0_dt.s2;
- *((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y)) = out0_dt.s3;
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr));
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
- DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
- DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
- DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
- DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
- DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
- DATA_TYPE d16 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
- DATA_TYPE d17 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
-
- DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
- DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
- DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
- DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
- DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
- DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
- DATA_TYPE d26 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
- DATA_TYPE d27 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
-
- DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
- DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
- DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
- DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
- DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
- DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
- DATA_TYPE d36 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
- DATA_TYPE d37 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
-
- DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
- DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
- DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
- DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
- DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 36 * src_stride_z));
- DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 37 * src_stride_z));
- DATA_TYPE d46 = *((__global DATA_TYPE *)(src_addr + 38 * src_stride_z));
- DATA_TYPE d47 = *((__global DATA_TYPE *)(src_addr + 39 * src_stride_z));
-
- DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 40 * src_stride_z));
- DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 41 * src_stride_z));
- DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 42 * src_stride_z));
- DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 43 * src_stride_z));
- DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 44 * src_stride_z));
- DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 45 * src_stride_z));
- DATA_TYPE d56 = *((__global DATA_TYPE *)(src_addr + 46 * src_stride_z));
- DATA_TYPE d57 = *((__global DATA_TYPE *)(src_addr + 47 * src_stride_z));
-
- DATA_TYPE d60 = *((__global DATA_TYPE *)(src_addr + 48 * src_stride_z));
- DATA_TYPE d61 = *((__global DATA_TYPE *)(src_addr + 49 * src_stride_z));
- DATA_TYPE d62 = *((__global DATA_TYPE *)(src_addr + 50 * src_stride_z));
- DATA_TYPE d63 = *((__global DATA_TYPE *)(src_addr + 51 * src_stride_z));
- DATA_TYPE d64 = *((__global DATA_TYPE *)(src_addr + 52 * src_stride_z));
- DATA_TYPE d65 = *((__global DATA_TYPE *)(src_addr + 53 * src_stride_z));
- DATA_TYPE d66 = *((__global DATA_TYPE *)(src_addr + 54 * src_stride_z));
- DATA_TYPE d67 = *((__global DATA_TYPE *)(src_addr + 55 * src_stride_z));
-
- DATA_TYPE d70 = *((__global DATA_TYPE *)(src_addr + 56 * src_stride_z));
- DATA_TYPE d71 = *((__global DATA_TYPE *)(src_addr + 57 * src_stride_z));
- DATA_TYPE d72 = *((__global DATA_TYPE *)(src_addr + 58 * src_stride_z));
- DATA_TYPE d73 = *((__global DATA_TYPE *)(src_addr + 59 * src_stride_z));
- DATA_TYPE d74 = *((__global DATA_TYPE *)(src_addr + 60 * src_stride_z));
- DATA_TYPE d75 = *((__global DATA_TYPE *)(src_addr + 61 * src_stride_z));
- DATA_TYPE d76 = *((__global DATA_TYPE *)(src_addr + 62 * src_stride_z));
- DATA_TYPE d77 = *((__global DATA_TYPE *)(src_addr + 63 * src_stride_z));
-
- // Compute the 8x4 intermediate tensor
- VEC_DATA_TYPE(float, 4)
- comm_fact0, comm_fact1, comm_fact2;
- VEC_DATA_TYPE(float, 4)
- tmp_col0, tmp_col1, tmp_col2, tmp_col3, tmp_col4, tmp_col5, tmp_col6, tmp_col7;
-
- COMPUTE_TMP_COL(tmp_col0, d00, d10, d20, d30, d40, d50, d60, d70, comm_fact0);
- COMPUTE_TMP_COL(tmp_col1, d01, d11, d21, d31, d41, d51, d61, d71, comm_fact0);
- COMPUTE_TMP_COL(tmp_col2, d02, d12, d22, d32, d42, d52, d62, d72, comm_fact0);
- COMPUTE_TMP_COL(tmp_col3, d03, d13, d23, d33, d43, d53, d63, d73, comm_fact0);
- COMPUTE_TMP_COL(tmp_col4, d04, d14, d24, d34, d44, d54, d64, d74, comm_fact0);
- COMPUTE_TMP_COL(tmp_col5, d05, d15, d25, d35, d45, d55, d65, d75, comm_fact0);
- COMPUTE_TMP_COL(tmp_col6, d06, d16, d26, d36, d46, d56, d66, d76, comm_fact0);
- COMPUTE_TMP_COL(tmp_col7, d07, d17, d27, d37, d47, d57, d67, d77, comm_fact0);
-
- // Compute the 4x4 output tile
- comm_fact0 = tmp_col1 + tmp_col2;
- comm_fact1 = tmp_col3 + tmp_col4;
- comm_fact2 = tmp_col5 + tmp_col6;
-
- VEC_DATA_TYPE(float, 4)
- out_col0 = comm_fact0 + comm_fact1 + (float)8.f * comm_fact2 + tmp_col0;
- VEC_DATA_TYPE(float, 4)
- out_col2 = comm_fact0 + (float)4.f * comm_fact1 + (float)2.f * comm_fact2;
-
- comm_fact0 = tmp_col1 - tmp_col2;
- comm_fact1 = tmp_col3 - tmp_col4;
- comm_fact2 = tmp_col5 - tmp_col6;
-
- VEC_DATA_TYPE(float, 4)
- out_col1 = comm_fact0 + (float)2.f * comm_fact1 + (float)4.f * comm_fact2;
- VEC_DATA_TYPE(float, 4)
- out_col3 = comm_fact0 + (float)8.f * comm_fact1 + comm_fact2 + tmp_col7;
-
-#if defined(HAS_BIAS)
- // Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
-
- out_col0 += (VEC_DATA_TYPE(float, 4))b;
- out_col1 += (VEC_DATA_TYPE(float, 4))b;
- out_col2 += (VEC_DATA_TYPE(float, 4))b;
- out_col3 += (VEC_DATA_TYPE(float, 4))b;
-#endif // defined(HAS_BIAS)
-
- // Store the output tile
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, (VEC_DATA_TYPE(DATA_TYPE, 4))(out_col0.s0, out_col1.s0, out_col2.s0, out_col3.s0), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, (VEC_DATA_TYPE(DATA_TYPE, 4))(out_col0.s1, out_col1.s1, out_col2.s1, out_col3.s1), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, (VEC_DATA_TYPE(DATA_TYPE, 4))(out_col0.s2, out_col1.s2, out_col2.s2, out_col3.s2), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, (VEC_DATA_TYPE(DATA_TYPE, 4))(out_col0.s3, out_col1.s3, out_col2.s3, out_col3.s3), A_VAL, B_VAL), 0,
- (__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y));
-#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 4x4/4x1 or 1x4, the filter size 5x5/5x1 or 1x5 and the data layout is NHWC
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
- * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
- * @note If this kernel is used to perform Winograd output transform 5x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note If this kernel is used to perform Winograd output transform 1x5, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_4x4_5x5_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif // defined(HAS_BIAS)
- int dst_size)
-{
- // Each thread stores a 4x4/4x1 or 1x4 tile
-#if defined(SRC_DEPTH)
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-#else /* defined(SRC_DEPTH) */
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
-#endif /* defined(SRC_DEPTH) */
-
- int y_in = get_global_id(1);
- int x_out = get_global_id(0);
- int y_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
- int z_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
-#if defined(SRC_DEPTH)
- int batch = get_global_id(2) / SRC_DEPTH;
-#endif /* defined(SRC_DEPTH) */
-
- __global unsigned char *dst_base_ptr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE);
-
-#if defined(SRC_DEPTH)
- dst_base_ptr += batch * dst_stride_w;
-#endif // defined(SRC_DEPTH)
-
- // Load the values across the channels to compose the input tile
- DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
- DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
- DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
- DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
- DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
- DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
- DATA_TYPE d06 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
- DATA_TYPE d07 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
-
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- // Compute out00, out01, out02 and out03
- float out00 = d00 + d01 + d02 + d03 + d04 + 8.0f * d05 + 8.0f * d06;
- float out01 = d01 - d02 + 2.0f * d03 - 2.0f * d04 + 4.0f * d05 - 4.0f * d06;
- float out02 = d01 + d02 + 4.0f * d03 + 4.0f * d04 + 2.0f * d05 + 2.0f * d06;
- float out03 = d01 - d02 + 8.0f * d03 - 8.0f * d04 + d05 - d06 + d07;
-
-#if defined(HAS_BIAS)
- // Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, x_out)));
-
- out00 += (float)b;
- out01 += (float)b;
- out02 += (float)b;
- out03 += (float)b;
-#endif // defined(HAS_BIAS)
-
- // Store the output tile
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- dst_base_ptr += y_out * dst_stride_y;
-
- int4 offset_z = min((int4)z_out + (int4)(0, 1, 2, 3), (int4)((int)DST_HEIGHT - 1)) * (int4)dst_stride_z;
-
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL,
- B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s3)) = out0_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s2)) = out0_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s1)) = out0_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s0)) = out0_dt.s0;
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- dst_base_ptr += z_out * dst_stride_z;
-
- int4 offset_y = min((int4)y_out + (int4)(0, 1, 2, 3), (int4)((int)DST_WIDTH - 1)) * (int4)dst_stride_y;
-
- VEC_DATA_TYPE(DATA_TYPE, 4)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL,
- B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3)) = out0_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2)) = out0_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1)) = out0_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0)) = out0_dt.s0;
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
- DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
- DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
- DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
- DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
- DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
- DATA_TYPE d16 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
- DATA_TYPE d17 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
-
- DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
- DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
- DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
- DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
- DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
- DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
- DATA_TYPE d26 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
- DATA_TYPE d27 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
-
- DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
- DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
- DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
- DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
- DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
- DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
- DATA_TYPE d36 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
- DATA_TYPE d37 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
-
- DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
- DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
- DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
- DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
- DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 36 * src_stride_z));
- DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 37 * src_stride_z));
- DATA_TYPE d46 = *((__global DATA_TYPE *)(src_addr + 38 * src_stride_z));
- DATA_TYPE d47 = *((__global DATA_TYPE *)(src_addr + 39 * src_stride_z));
-
- DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 40 * src_stride_z));
- DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 41 * src_stride_z));
- DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 42 * src_stride_z));
- DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 43 * src_stride_z));
- DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 44 * src_stride_z));
- DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 45 * src_stride_z));
- DATA_TYPE d56 = *((__global DATA_TYPE *)(src_addr + 46 * src_stride_z));
- DATA_TYPE d57 = *((__global DATA_TYPE *)(src_addr + 47 * src_stride_z));
-
- DATA_TYPE d60 = *((__global DATA_TYPE *)(src_addr + 48 * src_stride_z));
- DATA_TYPE d61 = *((__global DATA_TYPE *)(src_addr + 49 * src_stride_z));
- DATA_TYPE d62 = *((__global DATA_TYPE *)(src_addr + 50 * src_stride_z));
- DATA_TYPE d63 = *((__global DATA_TYPE *)(src_addr + 51 * src_stride_z));
- DATA_TYPE d64 = *((__global DATA_TYPE *)(src_addr + 52 * src_stride_z));
- DATA_TYPE d65 = *((__global DATA_TYPE *)(src_addr + 53 * src_stride_z));
- DATA_TYPE d66 = *((__global DATA_TYPE *)(src_addr + 54 * src_stride_z));
- DATA_TYPE d67 = *((__global DATA_TYPE *)(src_addr + 55 * src_stride_z));
-
- DATA_TYPE d70 = *((__global DATA_TYPE *)(src_addr + 56 * src_stride_z));
- DATA_TYPE d71 = *((__global DATA_TYPE *)(src_addr + 57 * src_stride_z));
- DATA_TYPE d72 = *((__global DATA_TYPE *)(src_addr + 58 * src_stride_z));
- DATA_TYPE d73 = *((__global DATA_TYPE *)(src_addr + 59 * src_stride_z));
- DATA_TYPE d74 = *((__global DATA_TYPE *)(src_addr + 60 * src_stride_z));
- DATA_TYPE d75 = *((__global DATA_TYPE *)(src_addr + 61 * src_stride_z));
- DATA_TYPE d76 = *((__global DATA_TYPE *)(src_addr + 62 * src_stride_z));
- DATA_TYPE d77 = *((__global DATA_TYPE *)(src_addr + 63 * src_stride_z));
-
- // Compute the 8x4 intermediate tensor
- VEC_DATA_TYPE(float, 4)
- comm_fact0, comm_fact1, comm_fact2;
- VEC_DATA_TYPE(float, 4)
- tmp_col0, tmp_col1, tmp_col2, tmp_col3, tmp_col4, tmp_col5, tmp_col6, tmp_col7;
-
- COMPUTE_TMP_COL(tmp_col0, d00, d10, d20, d30, d40, d50, d60, d70, comm_fact0);
- COMPUTE_TMP_COL(tmp_col1, d01, d11, d21, d31, d41, d51, d61, d71, comm_fact0);
- COMPUTE_TMP_COL(tmp_col2, d02, d12, d22, d32, d42, d52, d62, d72, comm_fact0);
- COMPUTE_TMP_COL(tmp_col3, d03, d13, d23, d33, d43, d53, d63, d73, comm_fact0);
- COMPUTE_TMP_COL(tmp_col4, d04, d14, d24, d34, d44, d54, d64, d74, comm_fact0);
- COMPUTE_TMP_COL(tmp_col5, d05, d15, d25, d35, d45, d55, d65, d75, comm_fact0);
- COMPUTE_TMP_COL(tmp_col6, d06, d16, d26, d36, d46, d56, d66, d76, comm_fact0);
- COMPUTE_TMP_COL(tmp_col7, d07, d17, d27, d37, d47, d57, d67, d77, comm_fact0);
-
- // Compute the output tile
- comm_fact0 = tmp_col1 + tmp_col2;
- comm_fact1 = tmp_col3 + tmp_col4;
- comm_fact2 = tmp_col5 + tmp_col6;
-
- VEC_DATA_TYPE(float, 4)
- out_col0 = comm_fact0 + comm_fact1 + 8.f * comm_fact2 + tmp_col0;
- VEC_DATA_TYPE(float, 4)
- out_col2 = comm_fact0 + 4.f * comm_fact1 + 2.f * comm_fact2;
-
- comm_fact0 = tmp_col1 - tmp_col2;
- comm_fact1 = tmp_col3 - tmp_col4;
- comm_fact2 = tmp_col5 - tmp_col6;
-
- VEC_DATA_TYPE(float, 4)
- out_col1 = comm_fact0 + 2.f * comm_fact1 + 4.f * comm_fact2;
- VEC_DATA_TYPE(float, 4)
- out_col3 = comm_fact0 + 8.f * comm_fact1 + comm_fact2 + tmp_col7;
-
-#if defined(HAS_BIAS)
- // Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- DATA_TYPE b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, x_out)));
-
- out_col0 += (VEC_DATA_TYPE(float, 4))b;
- out_col1 += (VEC_DATA_TYPE(float, 4))b;
- out_col2 += (VEC_DATA_TYPE(float, 4))b;
- out_col3 += (VEC_DATA_TYPE(float, 4))b;
-#endif // defined(HAS_BIAS)
-
- int4 offset_y = min((int4)y_out + (int4)(0, 1, 2, 3), (int4)((int)DST_WIDTH - 1)) * (int4)dst_stride_y;
- int4 offset_z = min((int4)z_out + (int4)(0, 1, 2, 3), (int4)((int)DST_HEIGHT - 1)) * (int4)dst_stride_z;
-
- // Store the output tile
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- out_col0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- out_col1_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col1, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- out_col2_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col2, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- out_col3_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col3, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s3)) = out_col3_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s3)) = out_col2_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s3)) = out_col1_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s3)) = out_col0_dt.s3;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s2)) = out_col3_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s2)) = out_col2_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s2)) = out_col1_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s2)) = out_col0_dt.s2;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s1)) = out_col3_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s1)) = out_col2_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s1)) = out_col1_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s1)) = out_col0_dt.s1;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s0)) = out_col3_dt.s0;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s0)) = out_col2_dt.s0;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s0)) = out_col1_dt.s0;
- *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s0)) = out_col0_dt.s0;
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-}
-#endif // defined(VEC_SIZE) && VEC_SIZE == 4
-
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
-#if defined(VEC_SIZE) && VEC_SIZE == 2
-/** This OpenCL kernel performs Winograd output transform when the output tile is 2x1, the filter size 3x1 and the data layout is NCHW
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_2x1_3x1_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(bias)
-#endif // defined(HAS_BIAS)
-)
-{
- winograd_output_transform_2x2_3x3_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes
-#if defined(HAS_BIAS)
- ,
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes
-#endif // defined(HAS_BIAS)
- );
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 2x1, the filter size 7x1 and the data layout is NHWC
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
- * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_2x1_7x1_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif // defined(HAS_BIAS)
- int dst_size)
-{
- winograd_output_transform_2x2_7x7_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes,
-#if defined(HAS_BIAS)
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes,
-#endif // defined(HAS_BIAS)
- dst_size);
-}
-#endif // defined(VEC_SIZE) && VEC_SIZE == 2
-
-#if defined(VEC_SIZE) && VEC_SIZE == 4
-/** This OpenCL kernel performs Winograd output transform when the output tile is 4x1, the filter size 3x1 and the data layout is NCHW
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_4x1_3x1_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(bias)
-#endif // defined(HAS_BIAS)
-)
-{
- winograd_output_transform_4x4_3x3_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes
-#if defined(HAS_BIAS)
- ,
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes
-#endif // defined(HAS_BIAS)
- );
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 4x1, the filter size 5x1 and the data layout is NCHW
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_4x1_5x1_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(bias)
-#endif // defined(HAS_BIAS)
-)
-{
- winograd_output_transform_4x4_5x5_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes
-#if defined(HAS_BIAS)
- ,
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes
-#endif // defined(HAS_BIAS)
- );
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 4x1, the filter size 3x1 and the data layout is NHWC
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
- * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_4x1_3x1_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif // defined(HAS_BIAS)
- int dst_size)
-{
- winograd_output_transform_4x4_3x3_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes,
-#if defined(HAS_BIAS)
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes,
-#endif // defined(HAS_BIAS)
- dst_size);
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 4x1, the filter size 5x1 and the data layout is NHWC
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=1
- * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
- * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_4x1_5x1_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif // defined(HAS_BIAS)
- int dst_size)
-{
- winograd_output_transform_4x4_5x5_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes,
-#if defined(HAS_BIAS)
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes,
-#endif // defined(HAS_BIAS)
- dst_size);
-}
-#endif // defined(VEC_SIZE) && VEC_SIZE == 4
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
-
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-#if defined(VEC_SIZE) && VEC_SIZE == 2
-/** This OpenCL kernel performs Winograd output transform when the output tile is 1x2, the filter size 1x3 and the data layout is NCHW
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_1x2_1x3_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(bias)
-#endif // defined(HAS_BIAS)
-)
-{
- winograd_output_transform_2x2_3x3_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes
-#if defined(HAS_BIAS)
- ,
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes
-#endif // defined(HAS_BIAS)
- );
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 1x2, the filter size 1x7 and the data layout is NHWC
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
- * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
- * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_1x2_1x7_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif // defined(HAS_BIAS)
- int dst_size)
-{
- winograd_output_transform_2x2_7x7_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes,
-#if defined(HAS_BIAS)
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes,
-#endif // defined(HAS_BIAS)
- dst_size);
-}
-#endif // defined(VEC_SIZE) && VEC_SIZE == 2
-
-#if defined(VEC_SIZE) && VEC_SIZE == 4
-/** This OpenCL kernel performs Winograd output transform when the output tile is 1x4, the filter size 1x3 and the data layout is NCHW
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_1x4_1x3_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(bias)
-#endif // defined(HAS_BIAS)
-)
-{
- winograd_output_transform_4x4_3x3_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes
-#if defined(HAS_BIAS)
- ,
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes
-#endif // defined(HAS_BIAS)
- );
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 1x4, the filter size 1x5 and the data layout is NCHW
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_1x4_1x5_nchw(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(bias)
-#endif // defined(HAS_BIAS)
-)
-{
- winograd_output_transform_4x4_5x5_nchw(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes
-#if defined(HAS_BIAS)
- ,
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes
-#endif // defined(HAS_BIAS)
- );
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 1x4, the filter size 1x3 and the data layout is NHWC
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
- * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_1x4_1x3_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif // defined(HAS_BIAS)
- int dst_size)
-{
- winograd_output_transform_4x4_3x3_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes,
-#if defined(HAS_BIAS)
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes,
-#endif // defined(HAS_BIAS)
- dst_size);
-}
-
-/** This OpenCL kernel performs Winograd output transform when the output tile is 1x4, the filter size 1x5 and the data layout is NHWC
- *
- * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
- * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=1
- * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4
- * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
- * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
- * @note -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
- * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void winograd_output_transform_1x4_1x5_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif // defined(HAS_BIAS)
- int dst_size)
-{
- winograd_output_transform_4x4_5x5_nhwc(src_ptr,
- src_stride_x,
- src_step_x,
- src_stride_y,
- src_step_y,
- src_stride_z,
- src_step_z,
- src_stride_w,
- src_step_w,
- src_offset_first_element_in_bytes,
- dst_ptr,
- dst_stride_x,
- dst_step_x,
- dst_stride_y,
- dst_step_y,
- dst_stride_z,
- dst_step_z,
- dst_stride_w,
- dst_step_w,
- dst_offset_first_element_in_bytes,
-#if defined(HAS_BIAS)
- bias_ptr,
- bias_stride_x,
- bias_step_x,
- bias_offset_first_element_in_bytes,
-#endif // defined(HAS_BIAS)
- dst_size);
-}
-#endif // defined(VEC_SIZE) && VEC_SIZE == 4
-#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-#endif // defined(NUM_TILES_X) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
diff --git a/src/core/CL/cl_kernels/yolo_layer.cl b/src/core/CL/cl_kernels/yolo_layer.cl
deleted file mode 100644
index 9601dddf67..0000000000
--- a/src/core/CL/cl_kernels/yolo_layer.cl
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#if defined(DATA_TYPE) && defined(ACTIVATION_TYPE) && defined(NUM_CLASSES) && defined(VEC_SIZE)
-
-#include "activation_float_helpers.h"
-
-#define SELECT_TYPE SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-
-#if VEC_SIZE != 1
-#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-
-/** This performs a YOLO partial activation function for NCHW data layout
- *
- * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Activation function should be given as a preprocessor argument using -DACTIVATION_TYPE=name. e.g. -DACTIVATION_TYPE=TANH
- * @note The number of classes should be given as a preprocessor argument using -DNUM_CLASSES=num. e.g. -DNUM_CLASSES=80
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y (Optional) Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
- */
-__kernel void yolo_layer_nchw(
- TENSOR3D_DECLARATION(input)
-#ifndef IN_PLACE
- ,
- TENSOR3D_DECLARATION(output)
-#endif /* not IN_PLACE */
-)
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
-#ifdef IN_PLACE
- Tensor3D output = input;
-#else /* IN_PLACE */
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-#endif /* IN_PLACE */
-
- const int box_ch_id = get_global_id(2) % (NUM_CLASSES + 5);
- const bool activate = box_ch_id != 2 && box_ch_id != 3;
-
- if(activate)
- {
- // Load data
- TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr);
- data = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, data, A_VAL, B_VAL); // select(1.0f, ACTIVATION_OP(ACTIVATION_TYPE, data), (SELECT_TYPE)activate);
-
- // Store result
- VSTORE(VEC_SIZE)
- (data, 0, (__global DATA_TYPE *)output.ptr);
- }
-#ifndef IN_PLACE
- else
- {
- // Load data
- TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr);
-
- // Store result
- VSTORE(VEC_SIZE)
- (data, 0, (__global DATA_TYPE *)output.ptr);
- }
-#endif // IN_PLACE
-}
-
-#else // VEC_SIZE != 1
-
-/** This performs a YOLO partial activation function for NCHW data layout
- *
- * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=1
- * @note Activation function should be given as a preprocessor argument using -DACTIVATION_TYPE=name. e.g. -DACTIVATION_TYPE=TANH
- * @note The number of classes should be given as a preprocessor argument using -DNUM_CLASSES=num. e.g. -DNUM_CLASSES=80
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y (Optional) Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
- */
-__kernel void yolo_layer_nhwc(
- TENSOR3D_DECLARATION(input)
-#ifndef IN_PLACE
- ,
- TENSOR3D_DECLARATION(output)
-#endif /* not IN_PLACE */
-)
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
-#ifdef IN_PLACE
- Tensor3D output = input;
-#else /* IN_PLACE */
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-#endif /* IN_PLACE */
-
- const int box_ch_id = get_global_id(0) % (NUM_CLASSES + 5);
- const bool activate = box_ch_id != 2 && box_ch_id != 3;
-
- if(activate)
- {
- // Load data
- DATA_TYPE data = *((__global DATA_TYPE *)input.ptr);
- data = select(data, ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, data, A_VAL, B_VAL), (SELECT_TYPE)activate);
-
- // Store result
- *((__global DATA_TYPE *)output.ptr) = data;
- }
-#ifndef IN_PLACE
- else
- {
- // Load data
- DATA_TYPE data = *((__global DATA_TYPE *)input.ptr);
-
- // Store result
- *((__global DATA_TYPE *)output.ptr) = data;
- }
-#endif // IN_PLACE
-}
-
-#endif // VEC_SIZE != 1
-#endif // defined(DATA_TYPE) && defined(ACTIVATION_TYPE) && defined(NUM_CLASSES) && defined(VEC_SIZE)
diff --git a/src/core/CL/gemm/CLGEMMHelpers.cpp b/src/core/CL/gemm/CLGEMMHelpers.cpp
deleted file mode 100644
index 61aa962198..0000000000
--- a/src/core/CL/gemm/CLGEMMHelpers.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/ITensorInfo.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-using namespace arm_compute::misc::shape_calculator;
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_lhs_rhs_info(unsigned int m, unsigned int n, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
- bool lhs_interleave, bool rhs_interleave, bool lhs_transpose, bool rhs_transpose, bool export_to_cl_image)
-{
- ARM_COMPUTE_ERROR_ON(m0 == 0 || n0 == 0);
- v0 = std::max(std::min(static_cast<int>(m / m0), static_cast<int>(v0)), static_cast<int>(1));
- h0 = std::max(std::min(static_cast<int>(n / n0), static_cast<int>(h0)), static_cast<int>(1));
-
- const GEMMLHSMatrixInfo lhs_info(m0, k0, v0, lhs_transpose, lhs_interleave);
- const GEMMRHSMatrixInfo rhs_info(n0, k0, h0, rhs_transpose, rhs_interleave, export_to_cl_image);
-
- return std::make_pair(lhs_info, rhs_info);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> select_lhs_rhs_info(std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> info_img,
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> info_buf,
- unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- const TensorInfo tensor_rhs_info(TensorShape(n, k, b), 1, data_type);
- const TensorShape shape = compute_rhs_reshaped_shape(tensor_rhs_info, info_img.second);
- const TensorInfo tensor_reshaped_info(shape, 1, data_type);
-
- if(bool(validate_image2d_support_on_rhs(tensor_reshaped_info, info_img.second)))
- {
- return info_img;
- }
- else
- {
- return info_buf;
- }
-}
-
-void update_padding_for_cl_image(ITensorInfo *tensor)
-{
- constexpr unsigned int num_floats_per_pixel = 4;
-
- const unsigned int stride_y_in_elements = tensor->strides_in_bytes()[1] / tensor->element_size();
- const unsigned int pixel_alignment = get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device());
-
- ARM_COMPUTE_ERROR_ON_MSG(pixel_alignment == 0, "Cannot retrieve cl_image pitch alignment");
- if(pixel_alignment == 0)
- {
- return;
- }
-
- const unsigned int row_pitch_alignment = pixel_alignment * num_floats_per_pixel;
- const unsigned int round_up_width = ((stride_y_in_elements + row_pitch_alignment - 1) / row_pitch_alignment) * row_pitch_alignment;
- const unsigned int padding = round_up_width - stride_y_in_elements;
-
- tensor->extend_padding(PaddingSize(0, padding, 0, 0));
-}
-
-Status validate_image2d_support_on_rhs(const ITensorInfo &tensor_reshaped_info, const GEMMRHSMatrixInfo &rhs_info)
-{
- if(rhs_info.export_to_cl_image)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((rhs_info.n0 == 2) || (rhs_info.n0 == 3), "Export to cl_image only supported with n0 = 4, 8 or 16");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((rhs_info.k0 == 2) || (rhs_info.k0 == 3), "Export to cl_image only supported with k0 = 4, 8 or 16");
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(&tensor_reshaped_info, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!image2d_from_buffer_supported(CLKernelLibrary::get().get_device()), "The extension cl_khr_image2d_from_buffer is not supported on the target platform");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) == 0, "Impossible to retrieve the cl_image pitch alignment");
-
- // Check the width and height of the output tensor.
- // Since we cannot create a 3d image from a buffer, the third dimension is collapsed on the second dimension
- const size_t max_image_w = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>();
- const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>();
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(tensor_reshaped_info.tensor_shape()[0] > max_image_w * 4, "Not supported width for cl_image");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(tensor_reshaped_info.tensor_shape()[1] * tensor_reshaped_info.tensor_shape()[2] > max_image_h, "Not supported height for cl_image");
- }
-
- return Status{};
-}
-} // namespace cl_gemm
-} // namespace arm_compute
diff --git a/src/core/CL/gemm/CLGEMMHelpers.h b/src/core/CL/gemm/CLGEMMHelpers.h
deleted file mode 100644
index 57624673c0..0000000000
--- a/src/core/CL/gemm/CLGEMMHelpers.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMHELPERS_H
-#define ARM_COMPUTE_CLGEMMHELPERS_H
-
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-class ITensorInfo;
-struct GEMMRHSMatrixInfo;
-
-namespace cl_gemm
-{
-/** Configure @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo
- *
- * @param[in] m Number of rows (M) in the LHS matrix not reshaped
- * @param[in] n Number of columns (N) in the RHS matrix not reshaped
- * @param[in] m0 Number of rows processed by each thread/work-item
- * @param[in] n0 Number of columns processed by each thread/work-item
- * @param[in] k0 Number of inner accumulation performed by each thread/work-item
- * @param[in] v0 Number of vertical blocks of size (m0xk0) stored on the same output row
- * @param[in] h0 Number of horizontal blocks of size (k0xn0) stored on the same output row
- * @param[in] lhs_interleave True if the v0 (m0xk0) blocks have to be interleaved in the output row
- * @param[in] rhs_interleave True if the h0 (k0xn0) blocks have to be interleaved in the output row
- * @param[in] lhs_transpose True if the (m0xk0) block has to be transposed before been stored
- * @param[in] rhs_transpose True if the (k0xn0) block has to be transposed before been stored
- * @param[in] export_to_cl_image (Optional) True if the RHS reshaped matrix has to be exported to cl_image
- *
- * @return @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo
- */
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_lhs_rhs_info(unsigned int m, unsigned int n, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
- bool lhs_interleave, bool rhs_interleave, bool lhs_transpose, bool rhs_transpose, bool export_to_cl_image = false);
-
-/** Select @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo
- *
- * This function accepts two pairs of GEMMLHSMatrixInfo/GEMMRHSMatrixInfo where only the first is with cl_image2d support,
- * and selects the valid one validating the GEMMRHSMatrixInfo. If the validation passes, the functions will return
- * the first GEMMLHSMatrixInfo/GEMMRHSMatrixInfo pair with cl_image2d support.
- *
- * @param[in] info_img GEMMLHSMatrixInfo/GEMMRHSMatrixInfo with cl_image2d support
- * @param[in] info_buf GEMMLHSMatrixInfo/GEMMRHSMatrixInfo to fall-back if cl_image2d cannot be used
- * @param[in] n Number of columns (N) in the RHS matrix not reshaped
- * @param[in] k Number of rows (K) in the RHS matrix not reshaped
- * @param[in] b Batch size
- * @param[in] data_type Data type
- *
- * @return @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo
- */
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> select_lhs_rhs_info(std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> info_img,
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> info_buf,
- unsigned int n, unsigned int k, unsigned int b, DataType data_type);
-
-/** Update padding required to export the OpenCL buffer to OpenCL image2d
- *
- * @param[in,out] tensor ITensorInfo of the tensor required to be exported to OpenCL image2d
- */
-void update_padding_for_cl_image(ITensorInfo *tensor);
-
-/** Utility function to validate the image2d OpenCL object support on the RHS reshaped matrix
- *
- * @param[in] tensor_reshaped_info TensorInfo for the RHS reshaped matrix
- * @param[in] rhs_info @ref GEMMRHSMatrixInfo
- *
- * @return Status reporting if we can use the image2d OpenCL object on the RHS reshaped matrix
- */
-Status validate_image2d_support_on_rhs(const ITensorInfo &tensor_reshaped_info, const GEMMRHSMatrixInfo &rhs_info);
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMHELPERS_H */
diff --git a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.cpp b/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.cpp
deleted file mode 100644
index b769802663..0000000000
--- a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.cpp
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-
-#include <map>
-#include <utility>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-CLGEMMDefaultConfigNativeBifrost::CLGEMMDefaultConfigNativeBifrost(GPUTarget gpu)
- : ICLGEMMKernelConfiguration(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeBifrost::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (CLGEMMDefaultConfigNativeBifrost::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- // Configurations for Mali-G71
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G71 =
- {
- { DataType::F32, &CLGEMMDefaultConfigNativeBifrost::configure_G71_f32 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigNativeBifrost::configure_G71_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigNativeBifrost::configure_G71_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigNativeBifrost::configure_G71_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigNativeBifrost::configure_G71_u8 }
- };
-
- // Configurations for Mali-G76
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G76 =
- {
- { DataType::F32, &CLGEMMDefaultConfigNativeBifrost::configure_G76_f32 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigNativeBifrost::configure_G76_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigNativeBifrost::configure_G76_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigNativeBifrost::configure_G76_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigNativeBifrost::configure_G76_u8 }
- };
-
- // Default configurations
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_default =
- {
- { DataType::F32, &CLGEMMDefaultConfigNativeBifrost::configure_default_f32 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigNativeBifrost::configure_default_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigNativeBifrost::configure_default_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigNativeBifrost::configure_default_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigNativeBifrost::configure_default_u8 }
- };
-
- switch(_target)
- {
- case GPUTarget::G71:
- if(gemm_configs_G71.find(data_type) != gemm_configs_G71.end())
- {
- return (this->*gemm_configs_G71[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- case GPUTarget::G76:
- if(gemm_configs_G76.find(data_type) != gemm_configs_G76.end())
- {
- return (this->*gemm_configs_G76[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- default:
- if(gemm_configs_default.find(data_type) != gemm_configs_default.end())
- {
- return (this->*gemm_configs_default[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeBifrost::configure_G71_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 2, 1, 1, false, false, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeBifrost::configure_G71_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(dot8_supported(CLKernelLibrary::get().get_device()))
- {
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 16384)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- if(m < 64)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 16, 1, 1, false, false, false, false);
- }
- }
- }
- else
- {
- if(m == 1)
- {
- if(n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 16, 1, 1, false, false, false, false);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeBifrost::configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n > 4196)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 2, 1, 1, false, false, false, false);
- }
- else
- {
- if(k < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 1, false, false, false, false);
- }
- else if(k >= 2048 && k < 16384)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 1, false, false, false, false);
- }
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 2, 1, 1, false, false, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeBifrost::configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 16384)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- if(m < 64)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 16, 1, 1, false, false, false, false);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeBifrost::configure_default_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- return configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 1, false, false, false, false);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeBifrost::configure_default_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- return configure_lhs_rhs_info(m, n, 5, 2, 16, 1, 1, false, false, false, false);
-}
-} // namespace cl_gemm
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.h b/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.h
deleted file mode 100644
index 78d47a8195..0000000000
--- a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMDEFAULTCONFIGNATIVEBIFROST_H
-#define ARM_COMPUTE_CLGEMMDEFAULTCONFIGNATIVEBIFROST_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** Bifrost based OpenCL GEMMNative configuration */
-class CLGEMMDefaultConfigNativeBifrost final : public ICLGEMMKernelConfiguration
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- CLGEMMDefaultConfigNativeBifrost(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G71_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G71_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_default_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_default_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMDEFAULTCONFIGNATIVEBIFROST_H */
diff --git a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.cpp b/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.cpp
deleted file mode 100644
index 18a899047a..0000000000
--- a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-
-#include <map>
-#include <utility>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-CLGEMMDefaultConfigNativeMidgard::CLGEMMDefaultConfigNativeMidgard(GPUTarget gpu)
- : ICLGEMMKernelConfiguration(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeMidgard::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (CLGEMMDefaultConfigNativeMidgard::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- // Configurations for Midgard architectures
- static std::map<DataType, ConfigurationFunctionExecutorPtr> default_configs =
- {
- { DataType::QASYMM8, &CLGEMMDefaultConfigNativeMidgard::default_q8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigNativeMidgard::default_q8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigNativeMidgard::default_q8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigNativeMidgard::default_q8 }
- };
-
- if(default_configs.find(data_type) != default_configs.end())
- {
- return (this->*default_configs[data_type])(m, n, k, b);
- }
- ARM_COMPUTE_ERROR("Not supported data type");
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeMidgard::default_q8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- const unsigned int m0 = std::min(m, static_cast<unsigned int>(4));
- const unsigned int n0 = std::min(n, static_cast<unsigned int>(4));
-
- return configure_lhs_rhs_info(m, n, m0, n0, 2, 1, 1, false, false, false, false);
-}
-} // namespace cl_gemm
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.h b/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.h
deleted file mode 100644
index 40c91d42b1..0000000000
--- a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMDEFAULTCONFIGNATIVEMIDGARD_H
-#define ARM_COMPUTE_CLGEMMDEFAULTCONFIGNATIVEMIDGARD_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** Midgard based OpenCL GEMMNative configuration */
-class CLGEMMDefaultConfigNativeMidgard final : public ICLGEMMKernelConfiguration
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- CLGEMMDefaultConfigNativeMidgard(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> default_q8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMDEFAULTCONFIGNATIVEMIDGARD_H */
diff --git a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.cpp b/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.cpp
deleted file mode 100644
index ab5aa11bf8..0000000000
--- a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.cpp
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-
-#include <map>
-#include <utility>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-CLGEMMDefaultConfigNativeValhall::CLGEMMDefaultConfigNativeValhall(GPUTarget gpu)
- : ICLGEMMKernelConfiguration(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeValhall::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (CLGEMMDefaultConfigNativeValhall::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- // Configurations for Mali-G77
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G77 =
- {
- { DataType::F32, &CLGEMMDefaultConfigNativeValhall::configure_G77_f32 },
- { DataType::F16, &CLGEMMDefaultConfigNativeValhall::configure_G77_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigNativeValhall::configure_G77_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigNativeValhall::configure_G77_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigNativeValhall::configure_G77_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigNativeValhall::configure_G77_u8 }
- };
-
- switch(_target)
- {
- case GPUTarget::G77:
- default:
- if(gemm_configs_G77.find(data_type) != gemm_configs_G77.end())
- {
- return (this->*gemm_configs_G77[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeValhall::configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 2, 1, 1, false, false, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeValhall::configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 2, 1, 1, false, false, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigNativeValhall::configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(dot8_supported(CLKernelLibrary::get().get_device()))
- {
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 16384)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- if(m < 64)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 16, 1, 1, false, false, false, false);
- }
- }
- }
- else
- {
- if(m == 1)
- {
- if(n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 16, 1, 1, false, false, false, false);
- }
- }
-}
-} // namespace cl_gemm
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.h b/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.h
deleted file mode 100644
index 08d2d57a3e..0000000000
--- a/src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMDEFAULTCONFIGNATIVEVALHALL_H
-#define ARM_COMPUTE_CLGEMMDEFAULTCONFIGNATIVEVALHALL_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** Valhall based OpenCL GEMMNative configuration */
-class CLGEMMDefaultConfigNativeValhall final : public ICLGEMMKernelConfiguration
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- CLGEMMDefaultConfigNativeValhall(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMDEFAULTCONFIGNATIVEVALHALL_H */
diff --git a/src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h b/src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h
deleted file mode 100644
index 39a534e817..0000000000
--- a/src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMNATIVEKERNELCONFIGURATION_H
-#define ARM_COMPUTE_CLGEMMNATIVEKERNELCONFIGURATION_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-#include "src/core/CL/gemm/native/CLGEMMDefaultConfigNativeBifrost.h"
-#include "src/core/CL/gemm/native/CLGEMMDefaultConfigNativeMidgard.h"
-#include "src/core/CL/gemm/native/CLGEMMDefaultConfigNativeValhall.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** CLGEMMNative factory class */
-class CLGEMMNativeKernelConfigurationFactory final
-{
-public:
- /** Static method to construct CLGEMMNative kernel object accordingly with the GPU target
- *
- * @param[in] gpu GPU target
- *
- * @return CLGEMMNative kernel configuration class
- */
- static std::unique_ptr<ICLGEMMKernelConfiguration> create(GPUTarget gpu)
- {
- switch(get_arch_from_target(gpu))
- {
- case GPUTarget::MIDGARD:
- return std::make_unique<CLGEMMDefaultConfigNativeMidgard>(gpu);
- case GPUTarget::BIFROST:
- return std::make_unique<CLGEMMDefaultConfigNativeBifrost>(gpu);
- case GPUTarget::VALHALL:
- return std::make_unique<CLGEMMDefaultConfigNativeValhall>(gpu);
- default:
- ARM_COMPUTE_ERROR("Not supported GPU target");
- }
- }
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMNATIVEKERNELCONFIGURATION_H */
diff --git a/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.cpp b/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.cpp
deleted file mode 100644
index 749d125c01..0000000000
--- a/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.cpp
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-
-#include <map>
-#include <utility>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-using namespace arm_compute::misc::shape_calculator;
-
-CLGEMMDefaultConfigReshapedBifrost::CLGEMMDefaultConfigReshapedBifrost(GPUTarget gpu)
- : ICLGEMMKernelConfiguration(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedBifrost::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (CLGEMMDefaultConfigReshapedBifrost::*)(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-
- // Configurations for Mali-G76
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G76 =
- {
- { DataType::F32, &CLGEMMDefaultConfigReshapedBifrost::configure_G76_f32 },
- { DataType::F16, &CLGEMMDefaultConfigReshapedBifrost::configure_G76_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigReshapedBifrost::configure_G76_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigReshapedBifrost::configure_G76_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigReshapedBifrost::configure_G76_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigReshapedBifrost::configure_G76_u8 }
- };
-
- // Configurations for Mali-G52
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G52 =
- {
- { DataType::F32, &CLGEMMDefaultConfigReshapedBifrost::configure_G52_f32 },
- { DataType::F16, &CLGEMMDefaultConfigReshapedBifrost::configure_G52_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_u8 }
- };
-
- // Configurations for Mali-G7x
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G7x =
- {
- { DataType::F32, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_f32 },
- { DataType::F16, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigReshapedBifrost::configure_G7x_u8 }
- };
-
- switch(_target)
- {
- case GPUTarget::G76:
- if(gemm_configs_G76.find(data_type) != gemm_configs_G76.end())
- {
- return (this->*gemm_configs_G76[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- default:
- if(gemm_configs_G7x.find(data_type) != gemm_configs_G7x.end())
- {
- return (this->*gemm_configs_G7x[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedBifrost::configure_G7x_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 8, 16, 16, true, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 4, 2, 16, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedBifrost::configure_G7x_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 8, 8, 2, true, true, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 4, 4, 2, true, true, true, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedBifrost::configure_G7x_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(dot8_supported(CLKernelLibrary::get().get_device()))
- {
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 2, 2, true, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 2, 2, true, false, false, true);
- }
- }
- else
- {
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 8, 2, 2, true, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 6, 4, 4, 2, 2, true, true, false, true);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedBifrost::configure_G52_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- if(workload <= 274.4000f)
- {
- if(r_nk <= 0.7461f)
- {
- if(r_mn <= 21.1667f)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 4, 4, 4, false, true, true, false, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(r_mk <= 17.3926f)
- {
- if(workload <= 542.4000f)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(r_nk <= 0.5463f)
- {
- if(workload <= 11767.6001f)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedBifrost::configure_G52_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
-
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(workload <= 323.4000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 4, 8, false, false, false, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 4, 2, 2, true, true, true, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedBifrost::configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- // Get lhs_info/rhs_info in case of OpenCL buffer
- if(n <= 4)
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 2, 8, 16, 16, true, false, false, true);
- }
- else
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 2, 8, 16, false, false, false, true);
- }
-
- // Get lhs_info/rhs_info in case of OpenCL image
- // Condition on the GPU workload
- if((m / 4) * (n / 4) >= 2560)
- {
- // Big workload
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 8, true, true, true, false, true);
- }
- else
- {
- // Small workload
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 1, true, true, true, false, true);
- }
-
- const TensorInfo tensor_rhs_info(TensorShape(n, k, b), 1, DataType::F32);
- const TensorShape shape = compute_rhs_reshaped_shape(tensor_rhs_info, rhs_info_img);
- const TensorInfo tensor_reshaped_info(shape, 1, DataType::F32);
-
- // In case of vector by matrix with few work-items, we use the OpenCL buffer rather than the OpenCL image2d
- const bool use_cl_image2d = (n <= 4) ? false : true;
-
- if(bool(validate_image2d_support_on_rhs(tensor_reshaped_info, rhs_info_img)) && use_cl_image2d)
- {
- return std::make_pair(lhs_info_img, rhs_info_img);
- }
- else
- {
- return std::make_pair(lhs_info_buf, rhs_info_buf);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedBifrost::configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
-
- if(workload <= 1595.2000f)
- {
- if(r_mk <= 2.1044f)
- {
- if(workload <= 870.4000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 2, true, false, true, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 4, 2, 2, false, false, true, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 4, 2, 2, false, false, true, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 4, 4, 2, true, true, true, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedBifrost::configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 4, 1, false, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 2, 2, false, true, false, true);
- }
-}
-} // namespace cl_gemm
-} // namespace arm_compute
diff --git a/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.h b/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.h
deleted file mode 100644
index 814b831b69..0000000000
--- a/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDBIFROST_H
-#define ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDBIFROST_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** Bifrost based OpenCL GEMMReshaped configuration */
-class CLGEMMDefaultConfigReshapedBifrost final : public ICLGEMMKernelConfiguration
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- CLGEMMDefaultConfigReshapedBifrost(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G52_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G52_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDBIFROST_H */
diff --git a/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.cpp b/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.cpp
deleted file mode 100644
index 13c139e201..0000000000
--- a/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.cpp
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-
-#include <map>
-#include <utility>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-CLGEMMDefaultConfigReshapedValhall::CLGEMMDefaultConfigReshapedValhall(GPUTarget gpu)
- : ICLGEMMKernelConfiguration(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedValhall::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (CLGEMMDefaultConfigReshapedValhall::*)(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-
- // Configurations for Mali-G77
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G77 =
- {
- { DataType::F32, &CLGEMMDefaultConfigReshapedValhall::configure_G77_f32 },
- { DataType::F16, &CLGEMMDefaultConfigReshapedValhall::configure_G77_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigReshapedValhall::configure_G77_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigReshapedValhall::configure_G77_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigReshapedValhall::configure_G77_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigReshapedValhall::configure_G77_u8 }
- };
-
- switch(_target)
- {
- case GPUTarget::G77:
- default:
- if(gemm_configs_G77.find(data_type) != gemm_configs_G77.end())
- {
- return (this->*gemm_configs_G77[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedValhall::configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 8, 16, 16, true, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 4, 2, 16, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedValhall::configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, false, false, true, false, false);
-
- if(r_mk <= 0.11824845522642136)
- {
- if(workload <= 880.0)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 4, false, false, true, false, false);
- }
- else
- {
- if(r_nk <= 0.42521367967128754)
- {
- if(workload <= 1726.4000244140625)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, false, false, true, false, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, false, true, true, false, true);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- else
- {
- if(workload <= 1241.6000366210938)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 4, false, false, true, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, false, false, true, false, false);
- }
- }
- }
- }
- else
- {
- if(workload <= 11404.7998046875)
- {
- if(r_mk <= 1.0126488208770752)
- {
- if(r_mn <= 2.545312523841858)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, false, true, true, false, true);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 4, false, false, true, false, false);
- }
- }
- else
- {
- if(workload <= 2881.199951171875)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, false, false, true, false, true);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, false, true, true, false, true);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
- else
- {
- if(r_nk <= 0.5765306055545807)
- {
- if(r_mn <= 6.010416746139526)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, false, true, true, false, true);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, false, true, false, true);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, false, true, false, true);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedValhall::configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 4, 1, false, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 2, 2, false, true, false, true);
- }
-}
-} // namespace cl_gemm
-} // namespace arm_compute
diff --git a/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.h b/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.h
deleted file mode 100644
index 2ce6482c46..0000000000
--- a/src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDVALHALL_H
-#define ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDVALHALL_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** Valhall based OpenCL GEMMReshaped configuration */
-class CLGEMMDefaultConfigReshapedValhall final : public ICLGEMMKernelConfiguration
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- CLGEMMDefaultConfigReshapedValhall(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDVALHALL_H */
diff --git a/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h b/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h
deleted file mode 100644
index de60698a91..0000000000
--- a/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMRESHAPEDKERNELCONFIGURATION_H
-#define ARM_COMPUTE_CLGEMMRESHAPEDKERNELCONFIGURATION_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-#include "src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedBifrost.h"
-#include "src/core/CL/gemm/reshaped/CLGEMMDefaultConfigReshapedValhall.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** CLGEMMReshaped factory class */
-class CLGEMMReshapedKernelConfigurationFactory final
-{
-public:
- /** Static method to call the CLGEMMReshaped kernel configuration class accordingly with the GPU target
- *
- * @param[in] gpu GPU target
- *
- * @return CLGEMMReshaped kernel configuration class
- */
- static std::unique_ptr<ICLGEMMKernelConfiguration> create(GPUTarget gpu)
- {
- switch(get_arch_from_target(gpu))
- {
- case GPUTarget::MIDGARD:
- case GPUTarget::BIFROST:
- return std::make_unique<CLGEMMDefaultConfigReshapedBifrost>(gpu);
- case GPUTarget::VALHALL:
- return std::make_unique<CLGEMMDefaultConfigReshapedValhall>(gpu);
- default:
- ARM_COMPUTE_ERROR("Not supported GPU target");
- }
- }
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMRESHAPEDKERNELCONFIGURATION_H */
diff --git a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.cpp b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.cpp
deleted file mode 100644
index 7e6ef72f34..0000000000
--- a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.cpp
+++ /dev/null
@@ -1,560 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-
-#include <map>
-#include <utility>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-using namespace arm_compute::misc::shape_calculator;
-
-CLGEMMDefaultConfigReshapedRHSOnlyBifrost::CLGEMMDefaultConfigReshapedRHSOnlyBifrost(GPUTarget gpu)
- : ICLGEMMKernelConfiguration(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (CLGEMMDefaultConfigReshapedRHSOnlyBifrost::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- // Configurations for Mali-G51
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G51 =
- {
- { DataType::F32, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G51_f32 },
- { DataType::F16, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G51_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G51_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G51_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G51_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G51_u8 }
- };
-
- // Configurations for Mali-G52
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G52 =
- {
- { DataType::F32, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G52_f32 },
- { DataType::F16, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G52_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_u8 }
- };
-
- // Configurations for Mali-G76
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G76 =
- {
- { DataType::F32, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G76_f32 },
- { DataType::F16, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G76_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G76_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G76_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G76_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G76_u8 }
- };
-
- // Configurations for Mali-G7x
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G7x =
- {
- { DataType::F32, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_f32 },
- { DataType::F16, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_u8 }
- };
-
- switch(_target)
- {
- case GPUTarget::G76:
- if(gemm_configs_G76.find(data_type) != gemm_configs_G76.end())
- {
- return (this->*gemm_configs_G76[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- case GPUTarget::G52:
- if(gemm_configs_G52.find(data_type) != gemm_configs_G52.end())
- {
- return (this->*gemm_configs_G52[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- case GPUTarget::G51:
- if(gemm_configs_G51.find(data_type) != gemm_configs_G51.end())
- {
- return (this->*gemm_configs_G51[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- default:
- if(gemm_configs_G7x.find(data_type) != gemm_configs_G7x.end())
- {
- return (this->*gemm_configs_G7x[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n <= 2548)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 4, false, true, false, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 8, false, true, false, true, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 4, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- const bool is_workload_big = ((m * n * b) / 16) >= 2048;
-
- if(m == 1)
- {
- if(n >= 8192)
- {
- const unsigned int h0 = std::max(n / 4, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 8, 1, h0, false, true, false, true, false);
- }
- else
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- if(n <= 204)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, false, true, false, true, false);
- }
- }
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(16)), static_cast<int>(1));
- if(is_workload_big)
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, h0, false, true, false, true);
- }
- else
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, false, true, false, true);
- }
- }
-
- // Get lhs_info/rhs_info in case of OpenCL image
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(16)), static_cast<int>(1));
- if(is_workload_big)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, h0, false, true, false, false, true);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, false, true, false, true, true);
- }
-
- const TensorInfo tensor_rhs_info(TensorShape(n, k, b), 1, DataType::F32);
- const TensorShape shape = compute_rhs_reshaped_shape(tensor_rhs_info, rhs_info_img);
- const TensorInfo tensor_reshaped_info(shape, 1, DataType::F32);
-
- // In case of vector by matrix or small workloads, we use the OpenCL buffer rather than the OpenCL image2d
- const bool use_cl_image2d = ((m == 1) || ((((m * n * b) / 16) < 2048) && n < 128)) ? false : true;
-
- if(bool(validate_image2d_support_on_rhs(tensor_reshaped_info, rhs_info_img)) && use_cl_image2d)
- {
- return std::make_pair(lhs_info_img, rhs_info_img);
- }
- else
- {
- return std::make_pair(lhs_info_buf, rhs_info_buf);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G52_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- if(m == 1)
- {
- if(r_nk <= 0.4664f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 16, false, true, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, 16, false, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, 16, false, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(workload <= 274.4000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 16, false, false, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, false, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, false, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G51_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int n0 = n < 1280 ? 2 : 4;
- const unsigned int h0 = std::max(n / n0, 1U);
- return configure_lhs_rhs_info(m, n, 1, n0, 4, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n > 2048)
- {
- const unsigned int h0 = std::max(n / 4, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, h0, false, true, false, true);
- }
- else
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, false, true, false, true);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 4, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G52_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- if(m == 1)
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, false, false);
-
- if(r_mk <= 0.0026f)
- {
- if(r_nk <= 0.4664f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, false, true, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- else
- {
- if(r_mk <= 0.0148f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, false, true, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
- else
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 8, 4, 1, 2, false, false, false, false, false);
-
- if(workload <= 362.6000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 16, false, false, false, true, false);
- }
- else
- {
- if(r_mn <= 22.6067f)
- {
- if(workload <= 708.8000f)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, false, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 8, 2, 1, 16, false, false, false, false, false);
- }
- }
- else
- {
- if(r_nk <= 0.0917f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 16, false, false, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, false, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
-
- if(m == 1)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, false, true, false, true, false);
- }
- else
- {
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(workload <= 7449.60f)
- {
- if(workload <= 691.60f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 8, false, false, false, false, false);
- }
- else
- {
- if(workload <= 4155.20f)
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 8, 2, 1, 32, false, false, false, false, false);
- }
- }
- }
- else
- {
- if(workload <= 16300.80f)
- {
- if(r_mn <= 44.56f)
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 8, 4, 4, 1, 1, false, true, false, false, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
- }
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, true, false, false, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G51_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int n0 = n < 1280 ? 2 : 4;
- const unsigned int h0 = std::max(n / n0, 1U);
- return configure_lhs_rhs_info(m, n, 1, n0, 8, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G7x_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(dot8_supported(CLKernelLibrary::get().get_device()))
- {
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true);
- }
- else
- {
- const unsigned int h0 = std::max(n / 4, 1U);
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 1, h0, false, true, false, true);
- }
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 2), static_cast<int>(128)), static_cast<int>(1));
- if(m == 1)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 1, h0, false, true, false, true);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 1, 2, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyBifrost::configure_G51_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, h0, false, true, false, true);
- }
- else
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 1, h0, false, true, false, true);
- }
-}
-
-} // namespace cl_gemm
-} // namespace arm_compute
diff --git a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.h b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.h
deleted file mode 100644
index db89d8317c..0000000000
--- a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDRHSONLYBIFROST_H
-#define ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDRHSONLYBIFROST_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** Bifrost based OpenCL GEMMReshapedOnlyRHS configuration */
-class CLGEMMDefaultConfigReshapedRHSOnlyBifrost final : public ICLGEMMKernelConfiguration
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- CLGEMMDefaultConfigReshapedRHSOnlyBifrost(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G52_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G51_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G52_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G51_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G51_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDRHSONLYBIFROST_H */
diff --git a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.cpp b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.cpp
deleted file mode 100644
index a0d38e44ae..0000000000
--- a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.cpp
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-
-#include <map>
-#include <utility>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-using namespace arm_compute::misc::shape_calculator;
-
-CLGEMMDefaultConfigReshapedRHSOnlyValhall::CLGEMMDefaultConfigReshapedRHSOnlyValhall(GPUTarget gpu)
- : ICLGEMMKernelConfiguration(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (CLGEMMDefaultConfigReshapedRHSOnlyValhall::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- // Configurations for Mali-G77
- static std::map<DataType, ConfigurationFunctionExecutorPtr> gemm_configs_G77 =
- {
- { DataType::F32, &CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure_G77_f32 },
- { DataType::F16, &CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure_G77_f16 },
- { DataType::QASYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure_G77_u8 },
- { DataType::QSYMM8, &CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure_G77_u8 },
- { DataType::QASYMM8_SIGNED, &CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure_G77_u8 },
- { DataType::QSYMM8_PER_CHANNEL, &CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure_G77_u8 }
- };
-
- switch(_target)
- {
- case GPUTarget::G77:
- default:
- if(gemm_configs_G77.find(data_type) != gemm_configs_G77.end())
- {
- return (this->*gemm_configs_G77[data_type])(m, n, k, b);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported data type");
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- if(m == 1)
- {
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
-
- if(r_mk <= 0.0064484127797186375)
- {
- if(r_mn <= 0.0028273810748942196)
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- const unsigned int h0 = std::max(n / 4, 1U);
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, 16, false, true, false, false, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 4, 1, h0, false, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 8, false, true, false, false, false);
- }
- }
- else
- {
- if(r_mk <= 0.020312500186264515)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 4, false, true, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, true, false);
- }
- }
- }
- else
- {
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
-
- if(workload <= 1999.2000122070312)
- {
- if(workload <= 747.1999816894531)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, false, true, false, true, false);
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 2, false, false, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, false, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(r_mn <= 0.03348214365541935)
- {
- if(r_mk <= 0.028125000186264515)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, false, true, false, true, false);
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 2, false, false, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, false, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, true, false, false, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 16, false, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- if(n <= 836.0)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, false, true, false, true, false);
- }
- }
- else if(m < 128)
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(256)), static_cast<int>(1));
- if(k >= 512)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 1, h0, false, true, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, false, true, false, false);
- }
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(256)), static_cast<int>(1));
- if(n >= 64)
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 4, 1, h0, false, true, false, false);
- }
- else
- {
- if(k >= 512)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 1, h0, false, true, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, false, true, false, false);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> CLGEMMDefaultConfigReshapedRHSOnlyValhall::configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, h0, false, true, false, true);
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(256)), static_cast<int>(1));
- if(m >= 28)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 1, h0, false, true, false, true);
- }
- }
-}
-} // namespace cl_gemm
-} // namespace arm_compute
diff --git a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.h b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.h
deleted file mode 100644
index 94c6a43c21..0000000000
--- a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDRHSONLYVALHALL_H
-#define ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDRHSONLYVALHALL_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** Valhall based OpenCL GEMMReshapedOnlyRHS configuration */
-class CLGEMMDefaultConfigReshapedRHSOnlyValhall final : public ICLGEMMKernelConfiguration
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- CLGEMMDefaultConfigReshapedRHSOnlyValhall(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMDEFAULTCONFIGRESHAPEDRHSONLYVALHALL_H */
diff --git a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h
deleted file mode 100644
index 001b98dca8..0000000000
--- a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMRESHAPEDONLYRHSKERNELCONFIGURATION_H
-#define ARM_COMPUTE_CLGEMMRESHAPEDONLYRHSKERNELCONFIGURATION_H
-
-#include "src/core/CL/ICLGEMMKernelConfiguration.h"
-#include "src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyBifrost.h"
-#include "src/core/CL/gemm/reshaped_only_rhs/CLGEMMDefaultConfigReshapedRHSOnlyValhall.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace cl_gemm
-{
-/** CLGEMMReshapedOnlyRHS factory class */
-class CLGEMMReshapedOnlyRHSKernelConfigurationFactory final
-{
-public:
- /** Static method to call the CLGEMMReshapedOnlyRHS kernel configuration class accordingly with the GPU target
- *
- * @param[in] gpu GPU target
- *
- * @return CLGEMMReshapedOnlyRHS kernel configuration class
- */
- static std::unique_ptr<ICLGEMMKernelConfiguration> create(GPUTarget gpu)
- {
- switch(get_arch_from_target(gpu))
- {
- case GPUTarget::MIDGARD:
- case GPUTarget::BIFROST:
- return std::make_unique<CLGEMMDefaultConfigReshapedRHSOnlyBifrost>(gpu);
- case GPUTarget::VALHALL:
- return std::make_unique<CLGEMMDefaultConfigReshapedRHSOnlyValhall>(gpu);
- default:
- ARM_COMPUTE_ERROR("Not supported GPU target");
- }
- }
-};
-} // namespace cl_gemm
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMRESHAPEDONLYRHSKERNELCONFIGURATION_H */
diff --git a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
index 909972482f..5b72354abe 100644
--- a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,37 +29,36 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "arm_compute/core/Validate.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN, "Only ARG_IDX_MAX and ARG_IDX_MIN are supported");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
+ DataType::S32, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32, DataType::S64);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN,
+ "Only ARG_IDX_MAX and ARG_IDX_MIN are supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions,
+ "Reduction axis greater than max number of dimensions");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32);
- }
- if(prev_output != nullptr && prev_output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(prev_output, 1, DataType::U32, DataType::S32);
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(prev_output, output);
- }
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32, DataType::S64,
+ DataType::U64);
}
return Status{};
@@ -67,59 +66,70 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *prev_outp
} // namespace
CLArgMinMaxLayerKernel::CLArgMinMaxLayerKernel()
- : _input(nullptr), _prev_output(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::ARG_IDX_MAX)
+ : _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::ARG_IDX_MAX)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLArgMinMaxLayerKernel::configure(const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op)
+void CLArgMinMaxLayerKernel::configure(const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int axis,
+ ReductionOperation op)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, prev_output, output, axis, op);
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, axis, op);
}
-void CLArgMinMaxLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op)
+void CLArgMinMaxLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int axis,
+ ReductionOperation op)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- TensorShape output_shape{ input->info()->tensor_shape() };
+ TensorShape output_shape{input->info()->tensor_shape()};
output_shape.set(axis, 1);
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_type(DataType::S32).reset_padding().set_is_resizable(true));
+ auto_init_if_empty(*output->info(), input->info()
+ ->clone()
+ ->set_tensor_shape(output_shape)
+ .set_data_type(DataType::S32)
+ .reset_padding()
+ .set_is_resizable(true));
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op));
- auto padding_info = get_padding_info({ input, prev_output, output });
+ auto padding_info = get_padding_info({input, output});
_input = input;
- _prev_output = prev_output;
_output = output;
_reduction_axis = axis;
_op = op;
// Set build options
- const auto vector_size = (axis == 0) ? 16U : adjust_vec_size(16U, input->info()->dimension(0));
+ const auto adjusted_vector_size = adjust_vec_size(16U, input->info()->dimension(0));
+ const auto vector_size = (adjusted_vector_size == 3U && axis == 0U)
+ ? 2U
+ : adjusted_vector_size; // the opencl kernel only supports sizes 2, 4, 8 and 16.
CLBuildOptions build_opts;
- build_opts.add_option_if(_prev_output != nullptr, "-DPREV_OUTPUT");
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % vector_size));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" +
+ support::cpp11::to_string(input->info()->dimension(0) % vector_size));
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vector_size));
build_opts.add_option_if(is_data_type_float(input->info()->data_type()), "-DFLOAT_DATA_TYPE");
build_opts.add_option_if_else(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX", "-DARG_MIN");
build_opts.add_option("-DDATA_TYPE_OUTPUT=" + get_cl_type_from_data_type(output->info()->data_type()));
+ build_opts.add_option("-DCOND_DATA_TYPE=" + get_cl_select_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DUNROLL_WITH_PRAGMA=1");
// Create kernel
- cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
std::string kernel_axis_name;
- switch(axis)
+ switch (axis)
{
case 0:
- {
- const ICLTensor *input_for_width = prev_output != nullptr ? _prev_output : _input;
- build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input_for_width->info()->dimension(0)));
-
+ build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
kernel_axis_name = "x";
- lws_hint = create_lws_hint_parallel_implementations(input_for_width->info()->dimension(0), vector_size);
- }
- break;
+ break;
case 1:
build_opts.add_option("-DHEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
kernel_axis_name = "y";
@@ -139,15 +149,18 @@ void CLArgMinMaxLayerKernel::configure(const CLCompileContext &compile_context,
_kernel = create_kernel(compile_context, "arg_min_max_" + kernel_axis_name, build_opts.options());
// Configure kernel window
- Window win = calculate_max_window((prev_output != nullptr) ? (*prev_output->info()) : (*input->info()), Steps(vector_size));
- ICLKernel::configure_internal(win, lws_hint);
+ Window win = calculate_max_window(*input->info(), Steps(vector_size));
+ ICLKernel::configure_internal(win);
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLArgMinMaxLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+Status CLArgMinMaxLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ unsigned int axis,
+ ReductionOperation op)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, prev_output, output, axis, op));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op));
return Status{};
}
@@ -156,43 +169,36 @@ void CLArgMinMaxLayerKernel::run(const Window &window, cl::CommandQueue &queue)
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
- switch(_reduction_axis)
+ switch (_reduction_axis)
{
case 0:
{
// Set out window
Window out_window(window);
+ Window in_window(window);
out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
+ in_window.set(Window::DimX,
+ Window::Dimension(0, _input->info()->dimension(0), _input->info()->dimension(0)));
+ in_window.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1), 1u));
// Get first input and output slices
- Window in_slice = window.first_slice_window_2D();
+ Window in_slice = in_window.first_slice_window_2D();
Window out_slice = out_window.first_slice_window_2D();
-
- // Reshape window
- const unsigned int num_tensors = _prev_output != nullptr ? 3 : 2;
-
- // Set local sums buffer
- unsigned int local_res_size = lws_hint()[0] * _output->info()->element_size();
- _kernel.setArg(num_arguments_per_2D_tensor() * num_tensors, local_res_size, nullptr);
do
{
unsigned int idx = 0;
add_2D_tensor_argument(idx, _input, in_slice);
- if(_prev_output != nullptr)
- {
- add_2D_tensor_argument(idx, _prev_output, in_slice);
- }
add_2D_tensor_argument(idx, _output, out_slice);
enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
+ } while (in_window.slide_window_slice_2D(in_slice) && out_window.slide_window_slice_2D(out_slice));
}
break;
case 1:
{
// Get first input and output slices
- Window window_in{ window };
- window_in.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1), _input->info()->dimension(1)));
+ Window window_in{window};
+ window_in.set(Window::DimY,
+ Window::Dimension(0, _input->info()->dimension(1), _input->info()->dimension(1)));
Window in_slice = window_in.first_slice_window_2D();
Window out_slice = window.first_slice_window_2D();
@@ -202,15 +208,15 @@ void CLArgMinMaxLayerKernel::run(const Window &window, cl::CommandQueue &queue)
add_2D_tensor_argument(idx, _input, in_slice);
add_2D_tensor_argument(idx, _output, out_slice);
enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window_in.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
+ } while (window_in.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
}
break;
case 2:
{
// Get first input and output slices
- Window window_in{ window };
- window_in.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2), _input->info()->dimension(2)));
+ Window window_in{window};
+ window_in.set(Window::DimZ,
+ Window::Dimension(0, _input->info()->dimension(2), _input->info()->dimension(2)));
Window in_slice = window_in.first_slice_window_3D();
Window out_slice = window.first_slice_window_3D();
@@ -220,14 +226,13 @@ void CLArgMinMaxLayerKernel::run(const Window &window, cl::CommandQueue &queue)
add_3D_tensor_argument(idx, _input, in_slice);
add_3D_tensor_argument(idx, _output, out_slice);
enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window_in.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(out_slice));
+ } while (window_in.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(out_slice));
}
break;
case 3:
{
// Get first input and output slices
- Window window_in{ window };
+ Window window_in{window};
window_in.set(3, Window::Dimension(0, 1, 1));
Window in_slice = window_in.first_slice_window_4D();
Window out_slice = window.first_slice_window_4D();
@@ -238,8 +243,7 @@ void CLArgMinMaxLayerKernel::run(const Window &window, cl::CommandQueue &queue)
add_4D_tensor_argument(idx, _input, in_slice);
add_4D_tensor_argument(idx, _output, out_slice);
enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window_in.slide_window_slice_4D(in_slice) && window.slide_window_slice_4D(out_slice));
+ } while (window_in.slide_window_slice_4D(in_slice) && window.slide_window_slice_4D(out_slice));
}
break;
default:
diff --git a/src/core/CL/kernels/CLArgMinMaxLayerKernel.h b/src/core/CL/kernels/CLArgMinMaxLayerKernel.h
index 929677f905..fb3b41b0de 100644
--- a/src/core/CL/kernels/CLArgMinMaxLayerKernel.h
+++ b/src/core/CL/kernels/CLArgMinMaxLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLARGMINMAXLAYERKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -56,48 +57,46 @@ public:
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/S32/F16/F32.
- * @param[in] prev_output Destination tensor of the previous iterations of @ref CLArgMinMaxLayerKernel. Data types supported: U32/S32
- * Has to be nullptr for the first iteration
- * @param[out] output Destination tensor. Data types supported: U32/S32
- * Output will have the same number of dimensions as input.
- * @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
- * @param[in] op Reduction operation to perform. Only ArgMin and ArgMax are supported.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/S32/F16/F32.
+ * @param[out] output Destination tensor. Data types supported: U32/S32
+ * Output will have the same number of dimensions as input.
+ * @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
+ * @param[in] op Reduction operation to perform. Only ArgMin and ArgMax are supported.
*/
- void configure(const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op);
+ void configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op);
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
* @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/S32/F16/F32.
- * @param[in] prev_output Destination tensor of the previous iterations of @ref CLArgMinMaxLayerKernel. Data types supported: U32/S32
- * Has to be nullptr for the first iteration
* @param[out] output Destination tensor. Data types supported: U32/S32
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
* @param[in] op Reduction operation to perform. Only ArgMin and ArgMax are supported.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int axis,
+ ReductionOperation op);
/** Static function to check if given info will lead to a valid configuration of @ref CLArgMinMaxLayerKernel.
*
- * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/S32/F16/F32.
- * @param[in] prev_output Destination tensor info of the previous iterations. Data types supported: U32/S32
- * Has to be nullptr for the first iteration
- * @param[in] output Destination tensor info. Data types supported: U32/S32
- * Output will have the same number of dimensions as input.
- * @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
- * @param[in] op Reduction operation to perform. Only ArgMin and ArgMax are supported.
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/S32/F16/F32.
+ * @param[in] output Destination tensor info. Data types supported: U32/S32
+ * Output will have the same number of dimensions as input.
+ * @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
+ * @param[in] op Reduction operation to perform. Only ArgMin and ArgMax are supported.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op);
+ static Status
+ validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
private:
const ICLTensor *_input;
- const ICLTensor *_prev_output;
ICLTensor *_output;
unsigned int _reduction_axis;
ReductionOperation _op;
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index 44bdc6f587..c88a852a44 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,49 +29,58 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/ActivationFunctionUtils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
using namespace arm_compute;
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
- const ITensorInfo *mean, const ITensorInfo *var,
- const ITensorInfo *beta, const ITensorInfo *gamma,
- float epsilon, ActivationLayerInfo act_info)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *mean,
+ const ITensorInfo *var,
+ const ITensorInfo *beta,
+ const ITensorInfo *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
{
ARM_COMPUTE_UNUSED(epsilon);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL)) != mean->dimension(0));
- if(beta != nullptr)
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(get_data_layout_dimension_index(
+ input->data_layout(), DataLayoutDimension::CHANNEL)) != mean->dimension(0));
+ if (beta != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, beta);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, beta);
}
- if(gamma != nullptr)
+ if (gamma != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, gamma);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, gamma);
}
- if(act_info.enabled())
+ if (act_info.enabled())
{
ActivationLayerInfo::ActivationFunction act = act_info.activation();
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() != DataType::F32 && input->data_type() != DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON(act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::RELU
- && act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU
- && act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
+ ARM_COMPUTE_RETURN_ERROR_ON(act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::RELU &&
+ act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU &&
+ act !=
+ ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
ARM_COMPUTE_RETURN_ERROR_ON(act_info.b() > act_info.a());
}
- if(output != nullptr && output->total_size() != 0)
+ if (output != nullptr && output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
@@ -83,14 +92,15 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
std::pair<Status, Window> validate_and_configure_window_nchw(ITensorInfo *input, ITensorInfo *output)
{
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->element_size(), input->dimension(0));
+ const unsigned int num_elems_processed_per_iteration =
+ adjust_vec_size(16 / input->element_size(), input->dimension(0));
// Configure kernel window
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
bool window_changed = false;
- if(output != nullptr)
+ if (output != nullptr)
{
AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
window_changed = update_window_and_padding(win, input_access, output_access);
@@ -101,29 +111,50 @@ std::pair<Status, Window> validate_and_configure_window_nchw(ITensorInfo *input,
window_changed = update_window_and_padding(win, input_access);
}
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ Status err =
+ (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
} // namespace
CLBatchNormalizationLayerKernel::CLBatchNormalizationLayerKernel()
- : _input(nullptr), _output(nullptr), _mean(nullptr), _var(nullptr), _beta(nullptr), _gamma(nullptr), _epsilon(0), _run_in_place(false)
+ : _input(nullptr),
+ _output(nullptr),
+ _mean(nullptr),
+ _var(nullptr),
+ _beta(nullptr),
+ _gamma(nullptr),
+ _epsilon(0),
+ _run_in_place(false)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma,
- float epsilon, ActivationLayerInfo act_info)
+void CLBatchNormalizationLayerKernel::configure(ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *mean,
+ const ICLTensor *var,
+ const ICLTensor *beta,
+ const ICLTensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, mean, var, beta, gamma, epsilon, act_info);
}
-void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta,
- const ICLTensor *gamma,
- float epsilon, ActivationLayerInfo act_info)
+void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *mean,
+ const ICLTensor *var,
+ const ICLTensor *beta,
+ const ICLTensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var);
- auto padding_info = get_padding_info({ input, output, mean, var, beta, gamma });
+ auto padding_info = get_padding_info({input, output, mean, var, beta, gamma});
_input = input;
_output = output;
_mean = mean;
@@ -138,13 +169,15 @@ void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_
mean->info(), var->info(), (beta != nullptr) ? beta->info() : nullptr,
(gamma != nullptr) ? gamma->info() : nullptr, epsilon, act_info));
- unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->info()->element_size(), input->info()->dimension(0));
+ unsigned int num_elems_processed_per_iteration =
+ adjust_vec_size(16 / input->info()->element_size(), input->info()->dimension(0));
// Set build options
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" +
+ support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration));
build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
@@ -153,29 +186,33 @@ void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_
build_opts.add_option_if(gamma == nullptr, "-DUSE_DEFAULT_GAMMA");
// Create kernel
- _kernel = create_kernel(compile_context, "batchnormalization_layer_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
+ _kernel =
+ create_kernel(compile_context,
+ "batchnormalization_layer_" + lower_string(string_from_data_layout(input->info()->data_layout())),
+ build_opts.options());
// Set kernel static arguments
unsigned int include_output = (!_run_in_place) ? 1 : 0;
- unsigned int idx = (1 + include_output) * num_arguments_per_3D_tensor() + 2 * num_arguments_per_1D_tensor(); // Skip the input and output parameters
- if(_beta != nullptr)
+ unsigned int idx = (1 + include_output) * num_arguments_per_3D_tensor() +
+ 2 * num_arguments_per_1D_tensor(); // Skip the input and output parameters
+ if (_beta != nullptr)
{
idx += num_arguments_per_1D_tensor(); // Skip beta parameter
}
- if(_gamma != nullptr)
+ if (_gamma != nullptr)
{
idx += num_arguments_per_1D_tensor(); // Skip gamma parameter
}
_kernel.setArg<cl_float>(idx++, _epsilon);
- if(output != nullptr)
+ if (output != nullptr)
{
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*output->info(), *input->info()->clone());
}
// Configure kernel window
- if(input->info()->data_layout() == DataLayout::NHWC)
+ if (input->info()->data_layout() == DataLayout::NHWC)
{
Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
ICLKernel::configure_internal(win);
@@ -201,18 +238,23 @@ void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_
_config_id += lower_string(string_from_data_layout(input->info()->data_layout()));
}
-Status CLBatchNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
- const ITensorInfo *mean, const ITensorInfo *var,
- const ITensorInfo *beta, const ITensorInfo *gamma,
- float epsilon, ActivationLayerInfo act_info)
+Status CLBatchNormalizationLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *mean,
+ const ITensorInfo *var,
+ const ITensorInfo *beta,
+ const ITensorInfo *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
{
const bool run_in_place = (output == nullptr) || (output == input);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, mean, var, beta, gamma, epsilon, act_info));
- if(input->data_layout() != DataLayout::NHWC)
+ if (input->data_layout() != DataLayout::NHWC)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_nchw(input->clone().get(), (run_in_place) ? nullptr : output->clone().get())
- .first);
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_and_configure_window_nchw(input->clone().get(), (run_in_place) ? nullptr : output->clone().get())
+ .first);
}
return Status{};
@@ -232,11 +274,11 @@ void CLBatchNormalizationLayerKernel::run(const Window &window, cl::CommandQueue
unsigned int idx = (1 + include_output) * num_arguments_per_3D_tensor();
add_1D_tensor_argument(idx, _mean, vector_slice);
add_1D_tensor_argument(idx, _var, vector_slice);
- if(_beta != nullptr)
+ if (_beta != nullptr)
{
add_1D_tensor_argument(idx, _beta, vector_slice);
}
- if(_gamma != nullptr)
+ if (_gamma != nullptr)
{
add_1D_tensor_argument(idx, _gamma, vector_slice);
}
@@ -245,11 +287,10 @@ void CLBatchNormalizationLayerKernel::run(const Window &window, cl::CommandQueue
{
idx = 0;
add_3D_tensor_argument(idx, _input, slice);
- if(!_run_in_place)
+ if (!_run_in_place)
{
add_3D_tensor_argument(idx, _output, slice);
}
enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
+ } while (window.slide_window_slice_3D(slice));
}
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.h
index 743f4a9594..1a88d2a8c5 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.h
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,8 @@
#ifndef ARM_COMPUTE_CLBATCHNORMALIZATIONLAYERKERNEL_H
#define ARM_COMPUTE_CLBATCHNORMALIZATIONLAYERKERNEL_H
+#include "arm_compute/function_info/ActivationLayerInfo.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -63,7 +65,13 @@ public:
* @param[in] epsilon (Optional) Small value to avoid division with zero. Default value is 0.001f.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
*/
- void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta = nullptr, const ICLTensor *gamma = nullptr, float epsilon = 0.001f,
+ void configure(ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *mean,
+ const ICLTensor *var,
+ const ICLTensor *beta = nullptr,
+ const ICLTensor *gamma = nullptr,
+ float epsilon = 0.001f,
ActivationLayerInfo act_info = ActivationLayerInfo());
/** Set the input and output tensors.
*
@@ -81,8 +89,15 @@ public:
* @param[in] epsilon (Optional) Small value to avoid division with zero. Default value is 0.001f.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta = nullptr,
- const ICLTensor *gamma = nullptr, float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
+ void configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *mean,
+ const ICLTensor *var,
+ const ICLTensor *beta = nullptr,
+ const ICLTensor *gamma = nullptr,
+ float epsilon = 0.001f,
+ ActivationLayerInfo act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayerKernel
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
@@ -98,10 +113,14 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output,
- const ITensorInfo *mean, const ITensorInfo *var,
- const ITensorInfo *beta = nullptr, const ITensorInfo *gamma = nullptr,
- float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *mean,
+ const ITensorInfo *var,
+ const ITensorInfo *beta = nullptr,
+ const ITensorInfo *gamma = nullptr,
+ float epsilon = 0.001f,
+ ActivationLayerInfo act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
index da41feb7b8..c640b5a8d6 100644
--- a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,10 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -44,7 +47,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_inf
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
// Validate output if initialized
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
@@ -52,7 +55,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_inf
return Status{};
}
-Status validate_arguments_static(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const ITensorInfo *output)
+Status validate_arguments_static(const ITensorInfo *input,
+ const int block_shape_x,
+ const int block_shape_y,
+ const ITensorInfo *output,
+ const CropInfo &crop_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
@@ -64,14 +71,12 @@ Status validate_arguments_static(const ITensorInfo *input, const int block_shape
ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_batch] % (block_shape_x * block_shape_y) != 0);
// Validate output if initialized
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] != (block_shape_x * input->tensor_shape()[idx_width]));
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_height] != (block_shape_y * input->tensor_shape()[idx_height]));
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_channel] != input->tensor_shape()[idx_channel]);
+ const TensorShape expected_output_shape = compute_batch_to_space_shape(
+ input->data_layout(), input->tensor_shape(), block_shape_x, block_shape_y, crop_info);
+ const TensorInfo expected_output = output->clone()->set_tensor_shape(expected_output_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &expected_output);
ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -80,9 +85,9 @@ Status validate_arguments_static(const ITensorInfo *input, const int block_shape
}
} // namespace
-CLBatchToSpaceLayerKernel::CLBatchToSpaceLayerKernel()
- : _input(nullptr), _block_shape(nullptr), _output(nullptr)
+CLBatchToSpaceLayerKernel::CLBatchToSpaceLayerKernel() : _input(nullptr), _block_shape(nullptr), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output)
@@ -90,11 +95,14 @@ void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const ICLTenso
configure(CLKernelLibrary::get().get_compile_context(), input, block_shape, output);
}
-void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output)
+void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *block_shape,
+ ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- auto padding_info = get_padding_info({ input, block_shape, output });
+ auto padding_info = get_padding_info({input, block_shape, output});
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), block_shape->info(), output->info()));
@@ -102,66 +110,83 @@ void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_contex
_block_shape = block_shape;
_output = output;
- const int idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
-
// Create kernel
CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(input->info()->dimension(3)));
- build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
- _kernel = create_kernel(compile_context, "batch_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
+ build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(3)));
+ _kernel = create_kernel(compile_context,
+ "batch_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())),
+ build_opts.options());
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps());
+ Window win = calculate_max_window(*output->info(), Steps());
ICLKernel::configure_internal(win);
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output)
+void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input,
+ const int32_t block_shape_x,
+ const int32_t block_shape_y,
+ ICLTensor *output,
+ const CropInfo &crop_info)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, output);
+ configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, output, crop_info);
}
-void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output)
+void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const int32_t block_shape_x,
+ const int32_t block_shape_y,
+ ICLTensor *output,
+ const CropInfo &crop_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- TensorShape output_shape = compute_batch_to_space_shape(input->info(), block_shape_x, block_shape_y);
- auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
+ const TensorShape output_shape = compute_batch_to_space_shape(
+ input->info()->data_layout(), input->info()->tensor_shape(), block_shape_x, block_shape_y);
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_static(input->info(), block_shape_x, block_shape_y, output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(
+ validate_arguments_static(input->info(), block_shape_x, block_shape_y, output->info(), crop_info));
_input = input;
_output = output;
- const int idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
-
// Create kernel
CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(input->info()->data_type())));
- build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(input->info()->dimension(3)));
+ build_opts.add_option("-DDATA_TYPE=" +
+ get_cl_unsigned_type_from_element_size(data_size_from_type(input->info()->data_type())));
+ build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(3)));
build_opts.add_option("-DBLOCK_SHAPE_X=" + support::cpp11::to_string(block_shape_x));
build_opts.add_option("-DBLOCK_SHAPE_Y=" + support::cpp11::to_string(block_shape_y));
- build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
- _kernel = create_kernel(compile_context, "batch_to_space_static_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
+ build_opts.add_option("-DCROP_LEFT=" + support::cpp11::to_string(crop_info.left));
+ build_opts.add_option("-DCROP_TOP=" + support::cpp11::to_string(crop_info.top));
+ _kernel = create_kernel(
+ compile_context, "batch_to_space_static_" + lower_string(string_from_data_layout(input->info()->data_layout())),
+ build_opts.options());
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps());
+ Window win = calculate_max_window(*output->info(), Steps());
ICLKernel::configure_internal(win);
}
-Status CLBatchToSpaceLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output)
+Status
+CLBatchToSpaceLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, block_shape, output);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, block_shape, output));
return Status{};
}
-Status CLBatchToSpaceLayerKernel::validate(const ITensorInfo *input, const int32_t block_shape_x, const int32_t block_shape_y, const ITensorInfo *output)
+Status CLBatchToSpaceLayerKernel::validate(const ITensorInfo *input,
+ const int32_t block_shape_x,
+ const int32_t block_shape_y,
+ const ITensorInfo *output,
+ const CropInfo &crop_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_static(input, block_shape_x, block_shape_y, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_static(input, block_shape_x, block_shape_y, output, crop_info));
return Status{};
}
@@ -170,32 +195,31 @@ void CLBatchToSpaceLayerKernel::run(const Window &window, cl::CommandQueue &queu
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
- Window slice_in = window.first_slice_window_3D();
- Window slice_out = window.first_slice_window_4D();
+ Window slice_out = window.first_slice_window_3D();
+ Window slice_in = window.first_slice_window_4D();
Window vector_slice = window.first_slice_window_1D();
vector_slice.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
- slice_out.set(3, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimX, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
+ slice_in.set(3, Window::Dimension(0, 0, 0));
int batch_id = 0;
do
{
unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
+ add_4D_tensor_argument(idx, _input, slice_in);
add_argument(idx, batch_id);
- if(_block_shape != nullptr)
+ if (_block_shape != nullptr)
{
add_1D_tensor_argument(idx, _block_shape, vector_slice);
}
- add_4D_tensor_argument(idx, _output, slice_out);
- enqueue(queue, *this, slice_in, lws_hint());
+ add_3D_tensor_argument(idx, _output, slice_out);
+ enqueue(queue, *this, slice_out, lws_hint());
++batch_id;
- }
- while(window.slide_window_slice_3D(slice_in));
+ } while (window.slide_window_slice_3D(slice_out));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.h b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.h
index 131a43e59c..b9d3e66fe2 100644
--- a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.h
+++ b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLBATCHTOSPACELAYERKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -52,6 +53,8 @@ public:
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
void configure(const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
/** Initialise the kernel's inputs and output.
@@ -60,16 +63,26 @@ public:
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *block_shape,
+ ICLTensor *output);
/** Initialise the kernel's inputs and output (Static block shape).
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[out] output Tensor output. Data types supported: same as @p input
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*/
- void configure(const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output);
+ void configure(const ICLTensor *input,
+ const int32_t block_shape_x,
+ const int32_t block_shape_y,
+ ICLTensor *output,
+ const CropInfo &crop_info);
/** Initialise the kernel's inputs and output (Static block shape).
*
* @param[in] compile_context The compile context to be used.
@@ -77,8 +90,14 @@ public:
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[out] output Tensor output. Data types supported: same as @p input
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const int32_t block_shape_x,
+ const int32_t block_shape_y,
+ ICLTensor *output,
+ const CropInfo &crop_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchToSpaceLayerKernel
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
@@ -86,6 +105,8 @@ public:
* @param[in] output Tensor output. Data types supported: same as @p input
*
* @return a status
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchToSpaceLayerKernel (Static block shape).
@@ -94,10 +115,15 @@ public:
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[in] output Tensor output. Data types supported: same as @p input
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const int32_t block_shape_x, const int32_t block_shape_y, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input,
+ const int32_t block_shape_x,
+ const int32_t block_shape_y,
+ const ITensorInfo *output,
+ const CropInfo &crop_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLBitwiseKernel.cpp b/src/core/CL/kernels/CLBitwiseKernel.cpp
index b1f7c00fac..de3fb43de8 100644
--- a/src/core/CL/kernels/CLBitwiseKernel.cpp
+++ b/src/core/CL/kernels/CLBitwiseKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,23 +27,30 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "arm_compute/core/Validate.h"
+
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
namespace arm_compute
{
-CLBitwiseKernel::CLBitwiseKernel()
- : _input1(nullptr), _input2(nullptr), _output(nullptr)
+CLBitwiseKernel::CLBitwiseKernel() : _input1(nullptr), _input2(nullptr), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLBitwiseKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, BitwiseOperation op)
+void CLBitwiseKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input1,
+ const ICLTensor *input2,
+ ICLTensor *output,
+ BitwiseOperation op)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
- if(op != BitwiseOperation::NOT)
+ if (op != BitwiseOperation::NOT)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input2);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8);
@@ -53,7 +60,7 @@ void CLBitwiseKernel::configure(const CLCompileContext &compile_context, const I
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*(output->info()), *(input1->info()));
- auto padding_info = get_padding_info({ input1, input2, output });
+ auto padding_info = get_padding_info({input1, input2, output});
// Configure kernel window
const unsigned int vec_size_x = adjust_vec_size(16 / output->info()->element_size(), output->info()->dimension(0));
@@ -65,7 +72,7 @@ void CLBitwiseKernel::configure(const CLCompileContext &compile_context, const I
// Create kernel
std::string kernel_name = "";
- switch(op)
+ switch (op)
{
case BitwiseOperation::AND:
kernel_name = "bitwise_and";
@@ -104,13 +111,12 @@ void CLBitwiseKernel::run(const Window &window, cl::CommandQueue &queue)
{
unsigned int idx = 0;
add_2D_tensor_argument(idx, _input1, slice);
- if(_input2 != nullptr)
+ if (_input2 != nullptr)
{
add_2D_tensor_argument(idx, _input2, slice);
}
add_2D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_2D(slice));
+ } while (window.slide_window_slice_2D(slice));
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLBitwiseKernel.h b/src/core/CL/kernels/CLBitwiseKernel.h
index c5a999643d..2c74955ae4 100644
--- a/src/core/CL/kernels/CLBitwiseKernel.h
+++ b/src/core/CL/kernels/CLBitwiseKernel.h
@@ -59,7 +59,11 @@ public:
* @param[out] output Destination tensor. Data types supported: U8.
* @param[in] op Bitwise operation to perform. Supported: AND, OR, NOT, XOR.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, BitwiseOperation op);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input1,
+ const ICLTensor *input2,
+ ICLTensor *output,
+ BitwiseOperation op);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp b/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp
index 2c12275e57..f32c518e29 100644
--- a/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp
+++ b/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,13 +25,13 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLArray.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
-#include "src/core/AccessWindowStatic.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -41,7 +41,10 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *boxes, const ITensorInfo *pred_boxes, const ITensorInfo *deltas, const BoundingBoxTransformInfo &info)
+Status validate_arguments(const ITensorInfo *boxes,
+ const ITensorInfo *pred_boxes,
+ const ITensorInfo *deltas,
+ const BoundingBoxTransformInfo &info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(boxes, pred_boxes, deltas);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(boxes);
@@ -54,7 +57,7 @@ Status validate_arguments(const ITensorInfo *boxes, const ITensorInfo *pred_boxe
ARM_COMPUTE_RETURN_ERROR_ON(boxes->num_dimensions() > 2);
const bool is_qasymm16 = boxes->data_type() == DataType::QASYMM16;
- if(is_qasymm16)
+ if (is_qasymm16)
{
const UniformQuantizationInfo boxes_qinfo = boxes->quantization_info().uniform();
ARM_COMPUTE_RETURN_ERROR_ON(boxes_qinfo.scale != 0.125f);
@@ -66,12 +69,12 @@ Status validate_arguments(const ITensorInfo *boxes, const ITensorInfo *pred_boxe
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(boxes, deltas);
}
- if(pred_boxes->total_size() > 0)
+ if (pred_boxes->total_size() > 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(pred_boxes->tensor_shape(), deltas->tensor_shape());
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(pred_boxes, boxes);
ARM_COMPUTE_RETURN_ERROR_ON(pred_boxes->num_dimensions() > 2);
- if(is_qasymm16)
+ if (is_qasymm16)
{
const UniformQuantizationInfo pred_boxes_qinfo = pred_boxes->quantization_info().uniform();
ARM_COMPUTE_RETURN_ERROR_ON(pred_boxes_qinfo.scale != 0.125f);
@@ -84,21 +87,31 @@ Status validate_arguments(const ITensorInfo *boxes, const ITensorInfo *pred_boxe
}
} // namespace
-CLBoundingBoxTransformKernel::CLBoundingBoxTransformKernel()
- : _boxes(nullptr), _pred_boxes(nullptr), _deltas(nullptr)
+CLBoundingBoxTransformKernel::CLBoundingBoxTransformKernel() : _boxes(nullptr), _pred_boxes(nullptr), _deltas(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLBoundingBoxTransformKernel::configure(const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info)
+void CLBoundingBoxTransformKernel::configure(const ICLTensor *boxes,
+ ICLTensor *pred_boxes,
+ const ICLTensor *deltas,
+ const BoundingBoxTransformInfo &info)
{
configure(CLKernelLibrary::get().get_compile_context(), boxes, pred_boxes, deltas, info);
}
-void CLBoundingBoxTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info)
+void CLBoundingBoxTransformKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *boxes,
+ ICLTensor *pred_boxes,
+ const ICLTensor *deltas,
+ const BoundingBoxTransformInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(boxes, pred_boxes, deltas);
- auto padding_info = get_padding_info({ boxes, pred_boxes, deltas });
- auto_init_if_empty(*pred_boxes->info(), deltas->info()->clone()->set_data_type(boxes->info()->data_type()).set_quantization_info(boxes->info()->quantization_info()));
+ auto padding_info = get_padding_info({boxes, pred_boxes, deltas});
+ auto_init_if_empty(*pred_boxes->info(), deltas->info()
+ ->clone()
+ ->set_data_type(boxes->info()->data_type())
+ .set_quantization_info(boxes->info()->quantization_info()));
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(boxes->info(), pred_boxes->info(), deltas->info(), info));
@@ -128,7 +141,7 @@ void CLBoundingBoxTransformKernel::configure(const CLCompileContext &compile_con
build_opts.add_option_if(info.apply_scale(), "-DSCALE_AFTER=" + float_to_string_with_full_precision(info.scale()));
build_opts.add_option_if(info.correct_transform_coords(), "-DOFFSET=1");
- if(is_quantized)
+ if (is_quantized)
{
build_opts.add_option("-DDATA_TYPE_DELTAS=" + get_cl_type_from_data_type(deltas->info()->data_type()));
const UniformQuantizationInfo boxes_qinfo = boxes->info()->quantization_info().uniform();
@@ -148,12 +161,15 @@ void CLBoundingBoxTransformKernel::configure(const CLCompileContext &compile_con
// Since the number of columns is a multiple of 4 by definition, we don't need to pad the tensor
const unsigned int num_elems_processed_per_iteration = 4;
- Window win = calculate_max_window(*deltas->info(), Steps(num_elems_processed_per_iteration));
+ Window win = calculate_max_window(*deltas->info(), Steps(num_elems_processed_per_iteration));
ICLKernel::configure_internal(win);
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLBoundingBoxTransformKernel::validate(const ITensorInfo *boxes, const ITensorInfo *pred_boxes, const ITensorInfo *deltas, const BoundingBoxTransformInfo &info)
+Status CLBoundingBoxTransformKernel::validate(const ITensorInfo *boxes,
+ const ITensorInfo *pred_boxes,
+ const ITensorInfo *deltas,
+ const BoundingBoxTransformInfo &info)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(boxes, pred_boxes, deltas, info));
return Status{};
diff --git a/src/core/CL/kernels/CLBoundingBoxTransformKernel.h b/src/core/CL/kernels/CLBoundingBoxTransformKernel.h
index 08f350e86a..9a1bb49bb9 100644
--- a/src/core/CL/kernels/CLBoundingBoxTransformKernel.h
+++ b/src/core/CL/kernels/CLBoundingBoxTransformKernel.h
@@ -58,7 +58,10 @@ public:
* @note Only single image prediction is supported. Height and Width (and scale) of the image will be contained in the BoundingBoxTransformInfo struct.
*
*/
- void configure(const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info);
+ void configure(const ICLTensor *boxes,
+ ICLTensor *pred_boxes,
+ const ICLTensor *deltas,
+ const BoundingBoxTransformInfo &info);
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
@@ -71,7 +74,11 @@ public:
* @note Only single image prediction is supported. Height and Width (and scale) of the image will be contained in the BoundingBoxTransformInfo struct.
*
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *boxes,
+ ICLTensor *pred_boxes,
+ const ICLTensor *deltas,
+ const BoundingBoxTransformInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLBoundingBoxTransform
*
@@ -85,7 +92,10 @@ public:
*
* @return a Status
*/
- static Status validate(const ITensorInfo *boxes, const ITensorInfo *pred_boxes, const ITensorInfo *deltas, const BoundingBoxTransformInfo &info);
+ static Status validate(const ITensorInfo *boxes,
+ const ITensorInfo *pred_boxes,
+ const ITensorInfo *deltas,
+ const BoundingBoxTransformInfo &info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp b/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp
index c969792c3e..ec58bf9e7a 100644
--- a/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp
+++ b/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,6 +28,10 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -43,15 +47,19 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups < 2, "Channel shuffling with less than 2 groups would be inefficient");
- const unsigned int channels = input->dimension(get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL));
+ const unsigned int channels =
+ input->dimension(get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL));
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups == channels, "Channel shuffling with same number of groups as number of channels would be inefficient");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ num_groups == channels,
+ "Channel shuffling with same number of groups as number of channels would be inefficient");
// There cannot be more groups than channels
ARM_COMPUTE_RETURN_ERROR_ON(num_groups > channels);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((channels % num_groups) != 0, "The number of channels must be a multiple of the number of groups");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((channels % num_groups) != 0,
+ "The number of channels must be a multiple of the number of groups");
// Checks performed when output is configured
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
@@ -66,28 +74,42 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*output, *input->clone());
- const bool is_nhwc = input->data_layout() == DataLayout::NHWC;
- const unsigned int num_elems_processed_per_iteration_x = is_nhwc ? 4 : max_cl_vector_width / input->element_size();
- constexpr unsigned int num_elems_processed_per_iteration_y = 2;
+ const bool is_nhwc = input->data_layout() == DataLayout::NHWC;
+ if (is_nhwc)
+ {
+ unsigned int num_elems_processed_per_iteration_x =
+ adjust_vec_size(max_cl_vector_width / input->element_size(), input->dimension(0));
+ Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x));
+ Window win_collapsed = win.collapse(win, Window::DimZ);
+ return std::make_pair(Status{}, win_collapsed);
+ }
+ else
+ {
+ const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / input->element_size();
+ constexpr unsigned int num_elems_processed_per_iteration_y = 2;
- // Configure kernel window
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
- AccessWindowRectangle output_access(output, 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
+ // Configure kernel window
+ Window win = calculate_max_window(
+ *input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
+ AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration_x,
+ num_elems_processed_per_iteration_y);
+ AccessWindowRectangle output_access(output, 0, 0, num_elems_processed_per_iteration_x,
+ num_elems_processed_per_iteration_y);
- const bool window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, input->valid_region());
+ const bool window_changed = update_window_and_padding(win, input_access, output_access);
- Window win_collapsed = win.collapse(win, Window::DimZ);
+ Window win_collapsed = win.collapse(win, Window::DimZ);
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win_collapsed);
+ Status err =
+ (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win_collapsed);
+ }
}
} // namespace
-CLChannelShuffleLayerKernel::CLChannelShuffleLayerKernel()
- : _input(nullptr), _output(nullptr)
+CLChannelShuffleLayerKernel::CLChannelShuffleLayerKernel() : _input(nullptr), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLChannelShuffleLayerKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int num_groups)
@@ -95,27 +117,42 @@ void CLChannelShuffleLayerKernel::configure(const ICLTensor *input, ICLTensor *o
configure(CLKernelLibrary::get().get_compile_context(), input, output, num_groups);
}
-void CLChannelShuffleLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups)
+void CLChannelShuffleLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int num_groups)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), num_groups));
+ auto padding_info = get_padding_info({input, output});
_input = input;
_output = output;
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), num_groups));
-
const DataLayout data_layout = input->info()->data_layout();
const bool is_nhwc = data_layout == DataLayout::NHWC;
- const unsigned int channels = input->info()->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL));
- const unsigned int vec_size = is_nhwc ? 4 : max_cl_vector_width / input->info()->element_size();
+ const unsigned int channels =
+ input->info()->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL));
+ unsigned int vec_size_x = 0;
+ unsigned int vec_size_x_leftovers = 0;
+ if (is_nhwc)
+ {
+ vec_size_x = adjust_vec_size(max_cl_vector_width / input->info()->element_size(), input->info()->dimension(0));
+ vec_size_x_leftovers = input->info()->dimension(0) % vec_size_x;
+ }
+ else
+ {
+ vec_size_x = max_cl_vector_width / input->info()->element_size();
+ }
// Set kernel build options
CLBuildOptions build_opts;
build_opts.add_option("-DNUM_GROUPS=" + support::cpp11::to_string(num_groups));
build_opts.add_option("-DK=" + support::cpp11::to_string(channels / num_groups));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size));
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
+ build_opts.add_option_if(is_nhwc, "-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_x_leftovers));
+ build_opts.add_option_if(is_nhwc, "-DSRC_DIM_X=" + support::cpp11::to_string(input->info()->dimension(0)));
build_opts.add_option("-DSRC_DIM_Z=" + support::cpp11::to_string(input->info()->dimension(2)));
- build_opts.add_option("-DLAST_ACCESSED=" + support::cpp11::to_string(std::max(static_cast<int>(channels - vec_size), 0)));
build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size()));
// Create kernel
@@ -146,9 +183,14 @@ void CLChannelShuffleLayerKernel::configure(const CLCompileContext &compile_cont
_config_id += support::cpp11::to_string(output->info()->dimension(1));
_config_id += "_";
_config_id += support::cpp11::to_string(output->info()->dimension(2));
+ if (data_layout == DataLayout::NHWC)
+ {
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+ }
}
-Status CLChannelShuffleLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_groups)
+Status
+CLChannelShuffleLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_groups)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, num_groups));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
diff --git a/src/core/CL/kernels/CLChannelShuffleLayerKernel.h b/src/core/CL/kernels/CLChannelShuffleLayerKernel.h
index 31c007f17e..43c939ebd8 100644
--- a/src/core/CL/kernels/CLChannelShuffleLayerKernel.h
+++ b/src/core/CL/kernels/CLChannelShuffleLayerKernel.h
@@ -60,7 +60,10 @@ public:
* @param[out] output Output tensor. Data type supported: Same as @p input
* @param[in] num_groups Number of groups. Must be greater than 1 and the number of channels of the tensors must be a multiple of the number of groups.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int num_groups);
/** Static function to check if given info will lead to a valid configuration of @ref CLChannelShuffleLayerKernel
*
* @param[in] input Input tensor info. Data types supported: All.
diff --git a/src/core/CL/kernels/CLCol2ImKernel.cpp b/src/core/CL/kernels/CLCol2ImKernel.cpp
deleted file mode 100644
index 44b8471725..0000000000
--- a/src/core/CL/kernels/CLCol2ImKernel.cpp
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLCol2ImKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-#include <cmath>
-
-using namespace arm_compute::misc::shape_calculator;
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims, unsigned int num_groups)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), compute_col2im_shape(*input, convolved_dims, true, num_groups));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_layout() != DataLayout::NCHW, "Col2Im output's data layout must always be NCHW");
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const Size2D &convolved_dims, unsigned int num_groups)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_col2im_shape(*input, convolved_dims, true, num_groups)).set_data_layout(DataLayout::NCHW));
-
- constexpr unsigned int num_elems_read_per_iteration = 8;
-
- // Configure window
- Window win = calculate_max_window(*input, Steps(num_elems_read_per_iteration));
-
- // Update window and padding just for the input tensor as we cannot access out-of-bounds elements in the output one
- AccessWindowHorizontal input_access(input, 0, num_elems_read_per_iteration);
- bool window_changed = update_window_and_padding(win, input_access);
-
- Coordinates coord;
- coord.set_num_dimensions(output->num_dimensions());
- output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLCol2ImKernel::CLCol2ImKernel()
- : _input(nullptr), _output(nullptr), _convolved_dims()
-{
-}
-
-void CLCol2ImKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, convolved_dims, num_groups);
-}
-
-void CLCol2ImKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), convolved_dims, num_groups));
-
- _input = input;
- _output = output;
- _convolved_dims = convolved_dims;
-
- const DataType data_type = input->info()->data_type();
-
- // Create kernel
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_opts.add_option("-DELEMENT_SIZE=" + support::cpp11::to_string(input->info()->element_size()));
- build_opts.add_option("-DWIDTH_INPUT=" + support::cpp11::to_string(input->info()->dimension(0)));
- build_opts.add_option("-DWIDTH_OUTPUT=" + support::cpp11::to_string(_convolved_dims.width));
- build_opts.add_option("-DNUM_GROUPS=" + support::cpp11::to_string(num_groups));
-
- _kernel = create_kernel(compile_context, "col2im", build_opts.options());
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), _convolved_dims, num_groups);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // Set config_id for enabling LWS tuning
- _config_id = "col2im_";
- _config_id += lower_string(string_from_data_type(input->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(num_groups);
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
-}
-
-Status CLCol2ImKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims, unsigned int num_groups)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, convolved_dims, num_groups));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), convolved_dims, num_groups).first);
- return Status{};
-}
-
-void CLCol2ImKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
-
- bool is_collapsed = false;
- bool is_collapsed_out = false;
-
- Window out_window;
- out_window.use_tensor_dimensions(_output->info()->tensor_shape());
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &is_collapsed);
- Window collapsed_out = out_window.collapse_if_possible(out_window, 3, &is_collapsed_out);
-
- ARM_COMPUTE_ERROR_ON(is_collapsed != is_collapsed_out);
-
- Window slice = collapsed.first_slice_window_3D();
- Window slice_out = collapsed_out.first_slice_window_4D();
- do
- {
- // Set inputs
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_4D_tensor_argument(idx, _output, slice_out);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice) && collapsed_out.slide_window_slice_4D(slice_out));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLCol2ImKernel.h b/src/core/CL/kernels/CLCol2ImKernel.h
deleted file mode 100644
index 710e048bca..0000000000
--- a/src/core/CL/kernels/CLCol2ImKernel.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLCOL2IMKERNEL_H
-#define ARM_COMPUTE_CLCOL2IMKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the col2im reshaping kernel.
- *
- * Rearranges each matrix column into image blocks. It's the inverse operation of @ref CLIm2ColKernel.
- *
- * For example, a vector of 9 elements can be reshaped to a block(image) of 3x3:
- *
- * @f[
- * \left( \begin{array}{ccccccccc}
- * a0 & a1 & a2 & a3 & a4 & a5 & a6 & a7 & a8 \\
- * \end{array} \right)
- * \rightarrow
- * \left( \begin{array}{ccc}
- * a0 & a1 & a2 \\
- * a3 & a4 & a5 \\
- * a6 & a7 & a8 \\
- * \end{array} \right)
- * @f]
- */
-class CLCol2ImKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLCol2ImKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLCol2ImKernel(const CLCol2ImKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLCol2ImKernel &operator=(const CLCol2ImKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLCol2ImKernel(CLCol2ImKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLCol2ImKernel &operator=(CLCol2ImKernel &&) = default;
- /** Default destructor */
- ~CLCol2ImKernel() = default;
- /** Set the input and output of the kernel.
- *
- * @param[in] input The input tensor to convert. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[out] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM],
- * while the rest represent batch of outputs. Data types supported: Same as @p input. Data layout: NCHW
- * @param[in] convolved_dims Output convolved dimensions.
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
- */
- void configure(const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups = 1);
- /** Set the input and output of the kernel.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to convert. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[out] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM],
- * while the rest represent batch of outputs. Data types supported: Same as @p input. Data layout: NCHW
- * @param[in] convolved_dims Output convolved dimensions.
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups = 1);
- /** Static function to check if given info will lead to a valid configuration of @ref CLCol2ImKernel
- *
- * @param[in] input The input tensor to convert. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[in] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM],
- * while the rest represent batch of outputs. Data types supported: Same as @p input. Data layout: NCHW
- * @param[in] convolved_dims Output convolved dimensions.
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims, unsigned int num_groups = 1);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-public:
- const ICLTensor *_input;
- ICLTensor *_output;
- Size2D _convolved_dims;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLCOL2IMKERNEL_H */
diff --git a/src/core/CL/kernels/CLComparisonKernel.cpp b/src/core/CL/kernels/CLComparisonKernel.cpp
index e2aee36bd8..a0f9aca54a 100644
--- a/src/core/CL/kernels/CLComparisonKernel.cpp
+++ b/src/core/CL/kernels/CLComparisonKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,9 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -37,22 +40,16 @@ namespace arm_compute
namespace
{
// Create supported comparisons map
-const std::map<ComparisonOperation, std::string> supported_comparison_ops =
-{
- { ComparisonOperation::Equal, "EQUAL" },
- { ComparisonOperation::NotEqual, "NOTEQUAL" },
- { ComparisonOperation::Greater, "GREATER" },
- { ComparisonOperation::GreaterEqual, "GREATEREQUAL" },
- { ComparisonOperation::Less, "LESS" },
- { ComparisonOperation::LessEqual, "LESSEQUAL" },
+const std::map<ComparisonOperation, std::string> supported_comparison_ops = {
+ {ComparisonOperation::Equal, "EQUAL"}, {ComparisonOperation::NotEqual, "NOTEQUAL"},
+ {ComparisonOperation::Greater, "GREATER"}, {ComparisonOperation::GreaterEqual, "GREATEREQUAL"},
+ {ComparisonOperation::Less, "LESS"}, {ComparisonOperation::LessEqual, "LESSEQUAL"},
};
-int calculate_num_elems_processed_per_iteration(const ITensorInfo &input)
-{
- return 16 / input.element_size();
-}
-
-Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output, ComparisonOperation operation)
+Status validate_arguments(const ITensorInfo &input1,
+ const ITensorInfo &input2,
+ const ITensorInfo &output,
+ ComparisonOperation operation)
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input1);
ARM_COMPUTE_RETURN_ERROR_ON(input1.data_type() == DataType::UNKNOWN);
@@ -63,7 +60,7 @@ Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2,
ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
// Validate in case of configured output
- if(output.total_size() > 0)
+ if (output.total_size() > 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
@@ -75,45 +72,37 @@ Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2,
std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
{
- const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
- const TensorShape &out_shape = broadcast_pair.first;
- const ValidRegion &valid_region = broadcast_pair.second;
-
- const unsigned int num_elems_processed_per_iteration = calculate_num_elems_processed_per_iteration(input1);
+ const TensorShape &out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
+ const unsigned int num_elems_processed_per_iteration =
+ adjust_vec_size(16 / input1.element_size(), output.dimension(0));
// Auto initialize output if not initialized
auto_init_if_empty(output, out_shape, 1, DataType::U8, QuantizationInfo());
- Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
- Window win_input1 = win.broadcast_if_dimension_le_one(input1);
- Window win_input2 = win.broadcast_if_dimension_le_one(input2);
-
- AccessWindowHorizontal input1_access(&input1, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal input2_access(&input2, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(&output, 0, num_elems_processed_per_iteration);
+ Window win = calculate_max_window(out_shape, Steps(num_elems_processed_per_iteration));
- bool window_changed = update_window_and_padding(win_input1, input1_access)
- || update_window_and_padding(win_input2, input2_access)
- || update_window_and_padding(win, output_access);
-
- output_access.set_valid_region(win, valid_region);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
+ return std::make_pair(Status{}, win);
}
} // namespace
-CLComparisonKernel::CLComparisonKernel()
- : _input1(nullptr), _input2(nullptr), _output(nullptr)
+CLComparisonKernel::CLComparisonKernel() : _input1(nullptr), _input2(nullptr), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLComparisonKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation)
+void CLComparisonKernel::configure(const ICLTensor *input1,
+ const ICLTensor *input2,
+ ICLTensor *output,
+ ComparisonOperation operation)
{
configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, operation);
}
-void CLComparisonKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation)
+void CLComparisonKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input1,
+ const ICLTensor *input2,
+ ICLTensor *output,
+ ComparisonOperation operation)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info(), operation));
@@ -129,17 +118,29 @@ void CLComparisonKernel::configure(const CLCompileContext &compile_context, cons
const std::string &operation_name = supported_comparison_ops.at(operation);
std::string kernel_name = "compare_" + lower_string(operation_name);
+ const unsigned int num_elems_processed_per_iteration =
+ adjust_vec_size(16 / input1->info()->element_size(), output->info()->dimension(0));
+
// Set kernel build options
std::set<std::string> build_opts;
build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input1->info()->data_type()));
- build_opts.emplace("-DVEC_SIZE=" + support::cpp11::to_string(calculate_num_elems_processed_per_iteration(*input1->info())));
+ build_opts.emplace("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.emplace("-DVEC_SIZE_LEFTOVER=" +
+ support::cpp11::to_string(output->info()->dimension(0) % num_elems_processed_per_iteration));
+ build_opts.emplace(
+ "-DVEC_SIZE_IN1=" + //
+ support::cpp11::to_string(input1->info()->dimension(0) == 1 ? 1 : num_elems_processed_per_iteration));
+ build_opts.emplace(
+ "-DVEC_SIZE_IN2=" + //
+ support::cpp11::to_string(input2->info()->dimension(0) == 1 ? 1 : num_elems_processed_per_iteration));
build_opts.emplace("-DOP=" + operation_name);
build_opts.emplace("-DOP_NAME=" + lower_string(operation_name));
- if(is_data_type_quantized(input1->info()->data_type()))
+ if (is_data_type_quantized(input1->info()->data_type()))
{
const UniformQuantizationInfo iq1_info = input1->info()->quantization_info().uniform();
const UniformQuantizationInfo iq2_info = input2->info()->quantization_info().uniform();
+ build_opts.emplace("-DIS_QUANTIZED");
build_opts.emplace("-DOFFSET_IN1=" + support::cpp11::to_string(iq1_info.offset));
build_opts.emplace("-DOFFSET_IN2=" + support::cpp11::to_string(iq2_info.offset));
build_opts.emplace("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1_info.scale));
@@ -163,12 +164,16 @@ void CLComparisonKernel::configure(const CLCompileContext &compile_context, cons
_config_id += lower_string(string_from_data_layout(input1->info()->data_layout()));
}
-Status CLComparisonKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ComparisonOperation operation)
+Status CLComparisonKernel::validate(const ITensorInfo *input1,
+ const ITensorInfo *input2,
+ const ITensorInfo *output,
+ ComparisonOperation operation)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output, operation));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(*input1->clone(), *input2->clone(), *output->clone()).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_and_configure_window(*input1->clone(), *input2->clone(), *output->clone()).first);
return Status{};
}
@@ -184,17 +189,18 @@ void CLComparisonKernel::run(const Window &window, cl::CommandQueue &queue)
bool can_collapse = true;
const bool is_vector = in_shape1.num_dimensions() == 1 || in_shape2.num_dimensions() == 1;
- if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1 && !is_vector)
+ if (std::min(in_shape1.total_size(), in_shape2.total_size()) > 1 && !is_vector)
{
can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
- for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++)
+ for (size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++)
{
can_collapse = (in_shape1[d] == in_shape2[d]);
}
}
bool has_collapsed = false;
- Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
+ Window collapsed =
+ can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
@@ -215,16 +221,7 @@ void CLComparisonKernel::run(const Window &window, cl::CommandQueue &queue)
ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
- }
- while(collapsed.slide_window_slice_3D(slice));
+ } while (collapsed.slide_window_slice_3D(slice));
}
-BorderSize CLComparisonKernel::border_size() const
-{
- const int num_elems_processed_per_iteration = calculate_num_elems_processed_per_iteration(*_input1->info());
-
- const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
- const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
- return BorderSize{ 0, border, 0, 0 };
-}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLComparisonKernel.h b/src/core/CL/kernels/CLComparisonKernel.h
index 0b94190183..2fb4ba06b6 100644
--- a/src/core/CL/kernels/CLComparisonKernel.h
+++ b/src/core/CL/kernels/CLComparisonKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,10 +21,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CLCOMPARISONKERNEL_H
-#define ARM_COMPUTE_CLCOMPARISONKERNEL_H
+#ifndef ACL_SRC_CORE_CL_KERNELS_CLCOMPARISONKERNEL_H
+#define ACL_SRC_CORE_CL_KERNELS_CLCOMPARISONKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -64,7 +65,11 @@ public:
* @param[out] output Destination tensor. Data types supported: U8.
* @param[in] operation Comparison operation to use.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input1,
+ const ICLTensor *input2,
+ ICLTensor *output,
+ ComparisonOperation operation);
/** Static function to check if given info will lead to a valid configuration of @ref CLComparisonKernel
*
* @param[in] input1 Source tensor. Data types supported: All.
@@ -74,11 +79,13 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ComparisonOperation operation);
+ static Status validate(const ITensorInfo *input1,
+ const ITensorInfo *input2,
+ const ITensorInfo *output,
+ ComparisonOperation operation);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
private:
const ICLTensor *_input1; /**< Source tensor 1 */
@@ -86,4 +93,4 @@ private:
ICLTensor *_output; /**< Destination tensor */
};
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLCOMPARISONKERNEL_H */
+#endif // ACL_SRC_CORE_CL_KERNELS_CLCOMPARISONKERNEL_H
diff --git a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
deleted file mode 100644
index dcf4e6662e..0000000000
--- a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-CLConvertFullyConnectedWeightsKernel::CLConvertFullyConnectedWeightsKernel()
- : _input(nullptr), _output(nullptr)
-{
-}
-
-void CLConvertFullyConnectedWeightsKernel::configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape,
- DataLayout data_layout)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, original_input_shape, data_layout);
-}
-
-void CLConvertFullyConnectedWeightsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape,
- DataLayout data_layout)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output tensor auto initialisation if not yet initialized
- auto_init_if_empty(*output->info(), *input->info()->clone());
-
- auto padding_info = get_padding_info({ input, output });
-
- ARM_COMPUTE_ERROR_THROW_ON(CLConvertFullyConnectedWeightsKernel::validate(input->info(), output->info(), original_input_shape, data_layout));
-
- _input = input;
- _output = output;
-
- const DataLayout input_data_layout = (data_layout == DataLayout::NCHW) ? DataLayout::NHWC : DataLayout::NCHW;
-
- const int width_idx = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::WIDTH);
- const int height_idx = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::HEIGHT);
- const int channel_idx = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::CHANNEL);
-
- const unsigned int num_elems_per_input_plane = original_input_shape[width_idx] * original_input_shape[height_idx];
- const unsigned int num_channels = original_input_shape[channel_idx];
-
- const unsigned int factor_1 = (data_layout == DataLayout::NCHW) ? num_elems_per_input_plane : num_channels;
- const unsigned int factor_2 = (data_layout == DataLayout::NCHW) ? num_channels : num_elems_per_input_plane;
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size()));
- build_opts.add_option("-DFACTOR_1=" + support::cpp11::to_string(factor_1));
- build_opts.add_option("-DFACTOR_2=" + support::cpp11::to_string(factor_2));
-
- // Create kernel
- _kernel = create_kernel(compile_context, "convert_fc_weights", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps());
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLConvertFullyConnectedWeightsKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const TensorShape &original_input_shape,
- DataLayout data_layout)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() != 2);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != original_input_shape.total_size_lower(3));
- ARM_COMPUTE_RETURN_ERROR_ON(data_layout == DataLayout::UNKNOWN);
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- }
-
- return Status{};
-}
-
-void CLConvertFullyConnectedWeightsKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input, window);
- add_2D_tensor_argument(idx, _output, window);
- enqueue(queue, *this, window, lws_hint());
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
deleted file mode 100644
index d1da793df2..0000000000
--- a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLCONVERTFULLYCONNECTEDWEIGHTSKERNEL_H
-#define ARM_COMPUTE_CLCONVERTFULLYCONNECTEDWEIGHTSKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface to convert the 2D Fully Connected weights from NCHW to NHWC or vice versa.
- *
- * @note This function can be applied to the 2D weights used by a Fully Connected layer if:
- * - It follows a Convolution layer
- * - The data layout used by the network does not match the one the model has been trained in.
- *
- * @note This function assumes the weights are already reshaped (transposed)
- */
-class CLConvertFullyConnectedWeightsKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLConvertFullyConnectedWeightsKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLConvertFullyConnectedWeightsKernel(const CLConvertFullyConnectedWeightsKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLConvertFullyConnectedWeightsKernel &operator=(const CLConvertFullyConnectedWeightsKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLConvertFullyConnectedWeightsKernel(CLConvertFullyConnectedWeightsKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLConvertFullyConnectedWeightsKernel &operator=(CLConvertFullyConnectedWeightsKernel &&) = default;
- /** Default destructor */
- ~CLConvertFullyConnectedWeightsKernel() = default;
- /** Set the input and output tensor.
- *
- * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: All.
- * @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
- * @param[in] data_layout The data layout the weights have been trained in.
- */
- void configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
- /** Set the input and output tensor.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: All.
- * @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
- * @param[in] data_layout The data layout the weights have been trained in.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
- /** Static function to check if given info will lead to a valid configuration of @ref CLConvertFullyConnectedWeightsKernel
- *
- * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: All.
- * @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
- * @param[in] data_layout The data layout the weights have been trained in.
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const TensorShape &original_input_shape, DataLayout data_layout);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLCONVERTFULLYCONNECTEDWEIGHTSKERNEL_H */
diff --git a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp
index d28cffa05f..f8ecc4c098 100644
--- a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp
+++ b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,7 +27,9 @@
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -36,9 +38,11 @@ namespace arm_compute
CLDeconvolutionLayerUpsampleKernel::CLDeconvolutionLayerUpsampleKernel()
: _input(nullptr), _output(nullptr), _info(), _data_layout(DataLayout::UNKNOWN)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-Status CLDeconvolutionLayerUpsampleKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
+Status CLDeconvolutionLayerUpsampleKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
const PadStrideInfo &info)
{
ARM_COMPUTE_UNUSED(info);
@@ -58,7 +62,7 @@ Status CLDeconvolutionLayerUpsampleKernel::validate(const ITensorInfo *input, co
ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(idx_h) == 0);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_c) != output->dimension(idx_c));
- for(size_t i = 3; i < Coordinates::num_max_dimensions; ++i)
+ for (size_t i = 3; i < Coordinates::num_max_dimensions; ++i)
{
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(i) != output->dimension(i));
}
@@ -66,20 +70,21 @@ Status CLDeconvolutionLayerUpsampleKernel::validate(const ITensorInfo *input, co
return Status{};
}
-void CLDeconvolutionLayerUpsampleKernel::configure(const ICLTensor *input, ICLTensor *output,
- const PadStrideInfo &info)
+void CLDeconvolutionLayerUpsampleKernel::configure(const ICLTensor *input, ICLTensor *output, const PadStrideInfo &info)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
}
-void CLDeconvolutionLayerUpsampleKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output,
- const PadStrideInfo &info)
+void CLDeconvolutionLayerUpsampleKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const PadStrideInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Perform validation step
ARM_COMPUTE_ERROR_THROW_ON(CLDeconvolutionLayerUpsampleKernel::validate(input->info(), output->info(), info));
- auto padding_info = get_padding_info({ input, output });
+ auto padding_info = get_padding_info({input, output});
_input = input;
_output = output;
@@ -96,7 +101,6 @@ void CLDeconvolutionLayerUpsampleKernel::configure(const CLCompileContext &compi
// Configure kernel window
Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
ICLKernel::configure_internal(win);
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
@@ -118,7 +122,7 @@ void CLDeconvolutionLayerUpsampleKernel::run(const Window &window, cl::CommandQu
const int out_end_y = _output->info()->dimension(idx_h) - _info.pad_bottom() + _info.stride().second - 1;
const int out_step_y = _info.stride().second;
- switch(_data_layout)
+ switch (_data_layout)
{
case DataLayout::NCHW:
{
@@ -136,8 +140,7 @@ void CLDeconvolutionLayerUpsampleKernel::run(const Window &window, cl::CommandQu
add_3D_tensor_argument(idx, _input, slice_in);
add_3D_tensor_argument(idx, _output, slice_out);
enqueue(queue, *this, slice_out, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice_in) && collapsed.slide_window_slice_3D(slice_out));
+ } while (collapsed.slide_window_slice_3D(slice_in) && collapsed.slide_window_slice_3D(slice_out));
break;
}
case DataLayout::NHWC:
@@ -155,8 +158,7 @@ void CLDeconvolutionLayerUpsampleKernel::run(const Window &window, cl::CommandQu
add_3D_tensor_argument(idx, _input, slice_in);
add_3D_tensor_argument(idx, _output, slice_out);
enqueue(queue, *this, slice_out, lws_hint());
- }
- while(window.slide_window_slice_3D(slice_in) && window.slide_window_slice_3D(slice_out));
+ } while (window.slide_window_slice_3D(slice_in) && window.slide_window_slice_3D(slice_out));
break;
}
default:
diff --git a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h
index e0d1322341..762989a836 100644
--- a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h
+++ b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h
@@ -62,7 +62,10 @@ public:
* @param[out] output Destination tensor. Data types supported: same as @p input. All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane.
* @param[in] info Contains padding and stride information described in @ref PadStrideInfo.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PadStrideInfo &info);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const PadStrideInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayerUpsample
*
* @param[in] input Source tensor info. Data types supported: All.
diff --git a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
index ca7e9d4b23..b33e0a8b6f 100644
--- a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
+++ b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,9 +27,10 @@
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/StringUtils.h"
+#include "arm_compute/core/Validate.h"
+
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
@@ -38,7 +39,11 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *bias,
+ const ITensorInfo *output,
+ const ITensorInfo *input_info,
+ const ITensorInfo *weights_info,
const PadStrideInfo &deconv_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, input_info, weights_info);
@@ -53,19 +58,21 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con
ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_w) != deconv_info.stride().first);
ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_h) != deconv_info.stride().second);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32);
- if(!is_qasymm)
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8,
+ DataType::QASYMM8_SIGNED, DataType::S32);
+ if (!is_qasymm)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, input_info, weights_info);
}
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_info->dimension(idx_w) * weights_info->dimension(idx_h) * weights_info->dimension(idx_b));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_info->dimension(idx_w) * weights_info->dimension(idx_h) *
+ weights_info->dimension(idx_b));
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != input_info->dimension(idx_w));
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != input_info->dimension(idx_h));
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(3) != input_info->dimension(idx_b));
- if(bias != nullptr)
+ if (bias != nullptr)
{
- if(is_qasymm)
+ if (is_qasymm)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
}
@@ -76,19 +83,26 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con
ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(0) != weights_info->dimension(idx_b));
}
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
const PadStrideInfo stride_info(deconv_info.stride().first, deconv_info.stride().second);
- auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), weights_info->dimension(idx_w), weights_info->dimension(idx_h), stride_info);
+ auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h),
+ weights_info->dimension(idx_w), weights_info->dimension(idx_h),
+ stride_info);
- const TensorShape output_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info);
+ const TensorShape output_shape =
+ misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
}
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input, ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info)
+std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input,
+ ITensorInfo *output,
+ const ITensorInfo *input_info,
+ const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
@@ -97,11 +111,17 @@ std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input
const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
const PadStrideInfo stride_info(deconv_info.stride().first, deconv_info.stride().second);
- auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), weights_info->dimension(idx_w), weights_info->dimension(idx_h), stride_info);
+ auto out_dims =
+ deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h),
+ weights_info->dimension(idx_w), weights_info->dimension(idx_h), stride_info);
- const TensorShape output_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info);
+ const TensorShape output_shape =
+ misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info);
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_layout(data_layout).set_quantization_info(input->quantization_info()));
+ auto_init_if_empty(*output, input->clone()
+ ->set_tensor_shape(output_shape)
+ .set_data_layout(data_layout)
+ .set_quantization_info(input->quantization_info()));
Window win = calculate_max_window(*input);
@@ -109,28 +129,37 @@ std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input
}
} // namespace
-CLDeconvolutionReshapeOutputKernel::CLDeconvolutionReshapeOutputKernel()
- : _add_bias(false),
- _bias(nullptr)
+CLDeconvolutionReshapeOutputKernel::CLDeconvolutionReshapeOutputKernel() : _add_bias(false), _bias(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLDeconvolutionReshapeOutputKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+void CLDeconvolutionReshapeOutputKernel::configure(const ICLTensor *input,
+ const ICLTensor *bias,
+ ICLTensor *output,
+ const ITensorInfo *input_info,
+ const ITensorInfo *weights_info,
const PadStrideInfo &deconv_info)
{
configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, input_info, weights_info, deconv_info);
}
-void CLDeconvolutionReshapeOutputKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info,
- const ITensorInfo *weights_info,
- const PadStrideInfo &deconv_info)
+void CLDeconvolutionReshapeOutputKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *bias,
+ ICLTensor *output,
+ const ITensorInfo *input_info,
+ const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, input_info, weights_info);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), input_info, weights_info, deconv_info));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr),
+ output->info(), input_info, weights_info, deconv_info));
- auto padding_info = get_padding_info({ input, bias, output });
+ auto padding_info = get_padding_info({input, bias, output});
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), input_info, weights_info, deconv_info);
+ auto win_config =
+ validate_and_configure_window(input->info(), output->info(), input_info, weights_info, deconv_info);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
const DataLayout data_layout = input_info->data_layout();
@@ -177,7 +206,11 @@ void CLDeconvolutionReshapeOutputKernel::configure(const CLCompileContext &compi
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLDeconvolutionReshapeOutputKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+Status CLDeconvolutionReshapeOutputKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *bias,
+ const ITensorInfo *output,
+ const ITensorInfo *input_info,
+ const ITensorInfo *weights_info,
const PadStrideInfo &deconv_info)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, input_info, weights_info, deconv_info));
@@ -193,7 +226,7 @@ void CLDeconvolutionReshapeOutputKernel::run(const Window &window, cl::CommandQu
unsigned int idx = 0;
add_3D_tensor_argument(idx, _input, collapsed);
add_3D_tensor_argument(idx, _output, collapsed);
- if(_add_bias)
+ if (_add_bias)
{
add_1D_tensor_argument(idx, _bias, collapsed);
}
diff --git a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
index ce354fa86f..8f436b07e3 100644
--- a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
+++ b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
@@ -67,7 +67,12 @@ public:
* @param[in] weights_info Deconvolution weights tensor info. Supported data types: same as @p input. Supported data layouts: same as @p input.
* @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. This kernel supports only stride_x = weights.width && stride_y = weights.height. Moreover, padding is not supported.
*/
- void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info);
+ void configure(const ICLTensor *input,
+ const ICLTensor *bias,
+ ICLTensor *output,
+ const ITensorInfo *input_info,
+ const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info);
/** Initialise the kernel's source and destination.
*
* @param[in] compile_context The compile context to be used.
@@ -79,8 +84,13 @@ public:
* @param[in] weights_info Deconvolution weights tensor info. Supported data types: same as @p input. Supported data layouts: same as @p input.
* @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. This kernel supports only stride_x = weights.width && stride_y = weights.height. Moreover, padding is not supported.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
- const PadStrideInfo &deconv_info);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *bias,
+ ICLTensor *output,
+ const ITensorInfo *input_info,
+ const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionReshapeOutputKernel.
*
@@ -93,7 +103,12 @@ public:
*
* @return a Status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info);
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *bias,
+ const ITensorInfo *output,
+ const ITensorInfo *input_info,
+ const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
deleted file mode 100644
index c98d66f390..0000000000
--- a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-#include <cstddef>
-#include <set>
-#include <string>
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON(input == output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input,
- 1,
- DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
- DataType::U16, DataType::U32, DataType::S32, DataType::F16,
- DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output,
- 1,
- DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16,
- DataType::U16, DataType::U32, DataType::S32, DataType::F16,
- DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == output->data_type(), "Input and output data types must be different");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_float(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs");
- ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8);
-
- // Validate in case of configured output
- if(output->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- }
-
- return Status{};
-}
-} // namespace
-
-void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, shift);
-}
-
-void CLDepthConvertLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- _input = input;
- _output = output;
-
- // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given)
- set_shape_if_empty(*output->info(), input->info()->tensor_shape());
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift));
-
- auto padding_info = get_padding_info({ input, output });
-
- // Get data sizes
- const size_t input_size = data_size_from_type(input->info()->data_type());
- const size_t output_size = data_size_from_type(output->info()->data_type());
-
- // Get number of elements to process per iterations
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->info()->element_size(), input->info()->dimension(0));
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration));
- build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
- // Conversions from float always SATURATE as out-of-bounds conversion from float->integer is implementation defined
- build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE");
- build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || is_data_type_float(output->info()->data_type()), "-DIS_DATA_TYPE_FLOAT");
- build_opts.add_option_if(is_data_type_quantized(input->info()->data_type()), "-DIS_DATA_TYPE_QUANTIZED");
-
- // Create kernel
- const std::string kernel_name = (input_size >= output_size) ? "convert_depth_down" : "convert_depth_up";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set shift arg
- unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters
- _kernel.setArg(idx++, shift);
-
- // Since we have a leftover vector size calculated using the input tensor shape, it is required to
- // have the input region equal to the tensor shape
- ValidRegion input_valid_region = input->info()->valid_region();
- input->info()->set_valid_region(ValidRegion(Coordinates(0, 0), input->info()->tensor_shape()));
-
- // Configure kernel
- Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win);
-
- // Collapse window
- const Window &full_window = window();
- Window collapsed_window = full_window.collapse_if_possible(full_window, Window::DimZ);
- ICLKernel::configure_internal(collapsed_window);
-
- // Restore the valid region
- input->info()->set_valid_region(input_valid_region);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(input->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
-}
-
-Status CLDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift));
-
- return Status{};
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.h b/src/core/CL/kernels/CLDepthConvertLayerKernel.h
deleted file mode 100644
index 8b511c6707..0000000000
--- a/src/core/CL/kernels/CLDepthConvertLayerKernel.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H
-#define ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H
-
-#include "arm_compute/core/Types.h"
-#include "src/core/CL/ICLSimple3DKernel.h"
-
-#include <cstdint>
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the depth conversion kernel. */
-class CLDepthConvertLayerKernel : public ICLSimple3DKernel
-{
-public:
- /** Set the input and output of the kernel.
- *
- * Valid conversions Input -> Output :
- *
- * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data)
- * - U8 -> S8, U16, S16, U32, S32, F16, F32
- * - U16 -> U8, S8, S16, U32, S32, F16, F32
- * - S16 -> U8, S8, U16, U32, S32, F16, F32
- * - U32 -> U8, S8, U16, S16, S32, F16, F32
- * - S32 -> U8, S8, U16, S16, U32, F16, F32
- * - F16 -> U8, S8, U16, S16, U32, F32
- * - F32 -> U8, S8, U16, S16, U32, F16
- *
- * @param[in] input The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
- * @param[out] output The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
- * @param[in] policy Conversion policy
- * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
- */
- void configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
- /** Set the input and output of the kernel.
- *
- * Valid conversions Input -> Output :
- *
- * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data)
- * - U8 -> S8, U16, S16, U32, S32, F16, F32
- * - U16 -> U8, S8, S16, U32, S32, F16, F32
- * - S16 -> U8, S8, U16, U32, S32, F16, F32
- * - U32 -> U8, S8, U16, S16, S32, F16, F32
- * - S32 -> U8, S8, U16, S16, U32, F16, F32
- * - F16 -> U8, S8, U16, S16, U32, F32
- * - F32 -> U8, S8, U16, S16, U32, F16
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
- * @param[out] output The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
- * @param[in] policy Conversion policy
- * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
- /** Static function to check if given info will lead to a valid configuration of @ref CLDepthConvertLayerKernel
- *
- * @param[in] input Source tensor info. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
- * @param[in] output Destination tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
- * @param[in] policy Conversion policy
- * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift);
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H */
diff --git a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp
index 8946f2a713..cdf19ab2e1 100644
--- a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,8 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -48,12 +50,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i
ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] % (block_shape * block_shape) != 0);
// Validate output if initialized
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] != (block_shape * input->tensor_shape()[idx_width]));
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_height] != (block_shape * input->tensor_shape()[idx_height]));
+ ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] !=
+ (block_shape * input->tensor_shape()[idx_width]));
+ ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_height] !=
+ (block_shape * input->tensor_shape()[idx_height]));
ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -62,9 +66,9 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i
}
} // namespace
-CLDepthToSpaceLayerKernel::CLDepthToSpaceLayerKernel()
- : _input(nullptr), _output(nullptr), _block_shape()
+CLDepthToSpaceLayerKernel::CLDepthToSpaceLayerKernel() : _input(nullptr), _output(nullptr), _block_shape()
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLDepthToSpaceLayerKernel::configure(const ICLTensor *input, ICLTensor *output, int32_t block_shape)
@@ -72,14 +76,18 @@ void CLDepthToSpaceLayerKernel::configure(const ICLTensor *input, ICLTensor *out
configure(CLKernelLibrary::get().get_compile_context(), input, output, block_shape);
}
-void CLDepthToSpaceLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape)
+void CLDepthToSpaceLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ int32_t block_shape)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- TensorShape output_shape = compute_depth_to_space_shape(input->info()->tensor_shape(), input->info()->data_layout(), block_shape);
+ TensorShape output_shape =
+ compute_depth_to_space_shape(input->info()->tensor_shape(), input->info()->data_layout(), block_shape);
auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
- auto padding_info = get_padding_info({ input, output });
+ auto padding_info = get_padding_info({input, output});
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), block_shape));
@@ -96,7 +104,9 @@ void CLDepthToSpaceLayerKernel::configure(const CLCompileContext &compile_contex
build_opts.add_option("-DCHANNEL_SIZE=" + support::cpp11::to_string(input->info()->dimension(idx_channel)));
build_opts.add_option("-DBLOCK_SHAPE=" + support::cpp11::to_string(block_shape));
build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
- _kernel = create_kernel(compile_context, "depth_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
+ _kernel = create_kernel(compile_context,
+ "depth_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())),
+ build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
@@ -135,7 +145,6 @@ void CLDepthToSpaceLayerKernel::run(const Window &window, cl::CommandQueue &queu
enqueue(queue, *this, slice_in, lws_hint());
++batch_id;
- }
- while(window.slide_window_slice_3D(slice_in));
+ } while (window.slide_window_slice_3D(slice_in));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.h b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.h
index 1f7f77b569..cef70c4dda 100644
--- a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.h
+++ b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLDEPTHTOSPACELAYERKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -61,7 +62,8 @@ public:
* @param[out] output Tensor output. Data types supported: same as @p input
* @param[in] block_shape Block shape value.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape);
+ void
+ configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthToSpaceLayerKernel.
*
* @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: All.
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
deleted file mode 100644
index ba7a782bf1..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
+++ /dev/null
@@ -1,434 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/CL/ICLKernel.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D dilation,
- const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((act_info.enabled()) && (input->data_type() == DataType::QASYMM8 || input->data_type() == DataType::QASYMM8_SIGNED)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::RELU)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LOGISTIC),
- "For QASYMM8 only logistic, relu, lower bounded relu and lower-upper bounded relu are supported");
- ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(0) != 3 || weights->dimension(1) != 3);
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1 || conv_info.stride().first > 3);
-
- ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1));
-
- const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type());
-
- if(biases != nullptr)
- {
- if(is_qasymm)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
- }
- ARM_COMPUTE_RETURN_ERROR_ON((biases->dimension(0) != weights->dimension(2)) && (weights->dimension(2) != 1 || biases->dimension(0) != weights->dimension(3)));
- ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
- }
-
- if(is_qasymm)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output_multipliers, output_shifts);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
-
- if(is_data_type_quantized_per_channel(weights->data_type()))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QSYMM8_PER_CHANNEL);
- ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != output_multipliers->dimension(0));
- ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != output_shifts->dimension(0));
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON(1 != output_multipliers->dimension(0));
- ARM_COMPUTE_RETURN_ERROR_ON(1 != output_shifts->dimension(0));
- }
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- }
-
- if(output->total_size() != 0)
- {
- const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, GPUTarget gpu_target, std::string &kernel_name, const Size2D dilation)
-{
- // Output auto inizialitation if not yet initialized
- const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_quantization_info(output->quantization_info()));
-
- const unsigned int conv_stride_x = conv_info.stride().first;
- const unsigned int conv_stride_y = conv_info.stride().second;
- const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type());
- const bool is_bifrost = get_arch_from_target(gpu_target) == GPUTarget::BIFROST;
-
- // Configure kernel window
- unsigned int num_elems_read_per_iteration_x = 0;
- unsigned int num_elems_read_per_iteration_y = 0;
- unsigned int num_elems_written_per_iteration_x = 0;
- unsigned int num_elems_written_per_iteration_y = 0;
-
- if(input->data_type() == DataType::F16)
- {
- kernel_name = "depthwise_convolution_3x3_f16";
- num_elems_written_per_iteration_x = 8 / data_size_from_type(input->data_type());
- num_elems_written_per_iteration_y = 1;
- num_elems_read_per_iteration_y = 3;
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 8;
- break;
- case 2:
- num_elems_read_per_iteration_x = 9;
- break;
- case 3:
- num_elems_read_per_iteration_x = 16;
- break;
- default:
- num_elems_read_per_iteration_x = 3 + (num_elems_written_per_iteration_x - 1) * conv_stride_x;
- break;
- }
- if(is_bifrost)
- {
- if(conv_stride_x == 1 && conv_stride_y == 1)
- {
- kernel_name = "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16";
- num_elems_read_per_iteration_x = 8;
- num_elems_written_per_iteration_x = 4;
- num_elems_read_per_iteration_y = 6;
- num_elems_written_per_iteration_y = 4;
- }
- else if(conv_stride_x == 2 && conv_stride_y == 2)
- {
- kernel_name = "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16";
- num_elems_read_per_iteration_x = 10;
- num_elems_written_per_iteration_x = 4;
- num_elems_read_per_iteration_y = 5;
- num_elems_written_per_iteration_y = 2;
- }
- }
- }
- else if(input->data_type() == DataType::F32 && is_bifrost)
- {
- if(conv_stride_x == 1 && conv_stride_y == 1)
- {
- kernel_name = "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32";
- num_elems_read_per_iteration_x = 4;
- num_elems_read_per_iteration_y = 6;
- num_elems_written_per_iteration_x = 2;
- num_elems_written_per_iteration_y = 4;
- }
- else if(conv_stride_x == 2 && conv_stride_y == 2)
- {
- kernel_name = "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32";
- num_elems_read_per_iteration_x = 6;
- num_elems_read_per_iteration_y = 5;
- num_elems_written_per_iteration_x = 2;
- num_elems_written_per_iteration_y = 2;
- }
- else
- {
- kernel_name = "depthwise_convolution_3x3";
- num_elems_written_per_iteration_x = 8 / data_size_from_type(input->data_type());
- num_elems_written_per_iteration_y = 1;
- num_elems_read_per_iteration_x = 3 + (num_elems_written_per_iteration_x - 1) * conv_stride_x;
- num_elems_read_per_iteration_y = 3;
- }
- }
- else
- {
- const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()) && !is_data_type_quantized_per_channel(weights->data_type());
-
- kernel_name = is_qasymm ? "dwc_3x3_native_quantized8" : "depthwise_convolution_3x3";
- kernel_name += (is_qasymm && is_dot8_supported ? "_dot8" : "");
- kernel_name += (is_qasymm ? "_nchw" : "");
-
- num_elems_written_per_iteration_x = 8 / data_size_from_type(input->data_type());
- num_elems_written_per_iteration_y = (is_qasymm && conv_stride_y == 1 && dilation.y() == 1) ? 2 : 1;
- num_elems_read_per_iteration_x = 3 + (num_elems_written_per_iteration_x - 1) * conv_stride_x + (conv_stride_x > 1 ? 1 : 0);
- num_elems_read_per_iteration_y = num_elems_written_per_iteration_y + 2;
- }
- // The OpenCL routine convolution1x3 does loadn(addr), loadn(addr + dilation_x) and loadn(addr + 2 * dilation_x) on the input.
- // Each of the three convolution1x3 gets called by passing addr, (addr + dilation_y) and (addr + 2 * dilation_y)
- // Hence we must add 2 * dilation.x/y() to the number of elements read in those axes per thread
- num_elems_read_per_iteration_x += 2 * dilation.x();
- num_elems_read_per_iteration_y += 2 * dilation.y();
-
- // Create window and update padding
- Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
-
- AccessWindowRectangle input_access(input, -conv_info.pad_left(), -conv_info.pad_top(),
- num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
- conv_stride_x, conv_stride_y);
- AccessWindowStatic weights_access(weights, 0, 0, 3, 3);
- AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
-
- bool window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
-
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLDepthwiseConvolutionLayer3x3NCHWKernel::CLDepthwiseConvolutionLayer3x3NCHWKernel()
- : _conv_stride_x(0), _conv_pad_top(0), _conv_pad_left(0)
-{
-}
-
-BorderSize CLDepthwiseConvolutionLayer3x3NCHWKernel::border_size() const
-{
- return _border_size;
-}
-
-void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts);
-}
-
-void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
- conv_info, depth_multiplier, act_info, dilation,
- (output_multipliers != nullptr) ? output_multipliers->info() : nullptr,
- (output_shifts != nullptr) ? output_shifts->info() : nullptr));
-
- _input = input;
- _output = output;
- _weights = weights;
- _biases = biases;
- _conv_stride_x = conv_info.stride().first;
- _conv_stride_y = conv_info.stride().second;
- _conv_pad_left = conv_info.pad_left();
- _conv_pad_top = conv_info.pad_top();
- _output_multipliers = output_multipliers;
- _output_shifts = output_shifts;
- _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
-
- // Configure kernel window
- std::string kernel_name;
- const GPUTarget gpu_target = get_target();
-
- auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, depth_multiplier, gpu_target, kernel_name, dilation);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- _border_size = BorderSize(input->info()->padding());
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(_output->info()->tensor_shape().z()));
- build_opts.add_option("-DDEPTH_MULTIPLIER=" + support::cpp11::to_string(depth_multiplier));
- build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
- build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x()));
- build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y()));
- build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
-
- if(_is_quantized)
- {
- const UniformQuantizationInfo iq_info = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wq_info = _weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = _output->info()->quantization_info().uniform();
-
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
- const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()) && !is_quantized_per_channel;
- build_opts.add_option("-DCONV_STRIDE_Y=" + support::cpp11::to_string(_conv_stride_y));
- build_opts.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iq_info.offset));
- build_opts.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wq_info.offset));
- build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oq_info.offset));
- build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(9 * iq_info.offset * wq_info.offset));
- build_opts.add_option_if(is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
- build_opts.add_option_if(is_dot8_supported, "-DIS_DOT8");
-
- // Compute non-per-channel multiplier and shift anyway to make OpenCL kernel simpler
- float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_opts.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_opts.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
-
- if(act_info.enabled())
- {
- int a_val{};
- int b_val{};
- std::tie(b_val, a_val) = get_quantized_activation_min_max(act_info, input->info()->data_type(), oq_info);
-
- const int o1 = oq_info.offset;
-
- build_opts.add_option("-DA_VAL=" + support::cpp11::to_string(a_val));
- build_opts.add_option("-DB_VAL=" + support::cpp11::to_string(b_val));
- build_opts.add_option("-DCONST_0=" + support::cpp11::to_string(o1));
-
- const float s1 = iq_info.scale;
- build_opts.add_option("-DS1_VAL=" + float_to_string_with_full_precision(s1));
- build_opts.add_option("-DO1_VAL=" + support::cpp11::to_string(o1));
- }
-
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DWEIGHTS_TYPE=" + get_cl_type_from_data_type(weights->info()->data_type()));
- build_opts.add_option("-DWEIGHTS_PROMOTED_TYPE=" + get_cl_promoted_type_from_data_type(weights->info()->data_type()));
- }
- else
- {
- build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
- build_opts.add_option_if(act_info.enabled(), "-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(win_config.second.x().step()));
- }
-
- build_opts.add_option_if(input->info()->data_type() == DataType::F16, "-DIS_F16");
- build_opts.add_option_if(input->info()->data_type() == DataType::F32, "-DIS_F32");
-
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(input->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
-}
-
-Status CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, GPUTarget gpu_target,
- const Size2D &dilation, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- std::string kernel_name;
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(),
- conv_info, depth_multiplier, gpu_target, kernel_name, dilation)
- .first);
-
- return Status{};
-}
-
-void CLDepthwiseConvolutionLayer3x3NCHWKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
-
- // Create input window and adjust
- Window collapsed_in = collapsed;
- collapsed_in.adjust(Window::DimX, -_conv_pad_left, true);
- collapsed_in.adjust(Window::DimY, -_conv_pad_top, true);
- collapsed_in.set_dimension_step(Window::DimX, collapsed_in.x().step() * _conv_stride_x);
- collapsed_in.set_dimension_step(Window::DimY, collapsed_in.y().step() * _conv_stride_y);
-
- Window slice_in = collapsed_in.first_slice_window_3D();
- Window slice_out = collapsed.first_slice_window_3D();
- Window slice_weights = window.first_slice_window_3D();
- slice_weights.set_dimension_step(Window::DimX, 0);
- slice_weights.set_dimension_step(Window::DimY, 0);
-
- unsigned int idx = 3 * num_arguments_per_3D_tensor();
-
- // Set output multipliers in case of quantized data type
- if(_is_quantized)
- {
- Window slice;
- slice.use_tensor_dimensions(_output_multipliers->info()->tensor_shape());
- add_1D_tensor_argument(idx, _output_multipliers, slice);
- add_1D_tensor_argument(idx, _output_shifts, slice);
- }
-
- // Set biases
- if(_biases != nullptr)
- {
- Window slice_biases;
- slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
- add_1D_tensor_argument(idx, _biases, slice_biases);
- }
-
- do
- {
- idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
- add_3D_tensor_argument(idx, _output, slice_out);
- add_3D_tensor_argument(idx, _weights, slice_weights);
-
- enqueue(queue, *this, slice_out, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice_out) && collapsed_in.slide_window_slice_3D(slice_in));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
deleted file mode 100644
index 45b5869676..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNCHWKERNEL3x3_H
-#define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNCHWKERNEL3x3_H
-
-#include "src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor when the data layout is NCHW.
- */
-class CLDepthwiseConvolutionLayer3x3NCHWKernel : public ICLDepthwiseConvolutionLayer3x3Kernel
-{
-public:
- /** Default constructor */
- CLDepthwiseConvolutionLayer3x3NCHWKernel();
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [3, 3, IFM].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU for QASYMM8 supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- */
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override;
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [3, 3, IFM].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU for QASYMM8 supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override;
- /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NCHWKernel
- *
- * @param[in] input Source tensor info. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor info. A 3D tensor with dimensions [3, 3, IFM].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported.
- * @param[in] gpu_target (Optional) GPU target to validate the kernel for. Defaults to midgard.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor info for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), GPUTarget gpu_target = GPUTarget::MIDGARD,
- const Size2D &dilation = Size2D(1U, 1U), const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr);
-
- void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-private:
- unsigned int _conv_stride_x;
- unsigned int _conv_pad_top;
- unsigned int _conv_pad_left;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNCHWKERNEL3x3_H */
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
deleted file mode 100644
index d13afd2010..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
+++ /dev/null
@@ -1,464 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/CL/ICLKernel.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation,
- const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((act_info.enabled()) && (input->data_type() == DataType::QASYMM8 || input->data_type() == DataType::QASYMM8_SIGNED)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::RELU)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LOGISTIC),
- "For QASYMM8 only logistic, relu, lower bounded relu and lower-upper bounded relu are supported");
- ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier > 1); // COMPMID-1071 Add depth multiplier support for NHWC
-
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1);
- ARM_COMPUTE_RETURN_ERROR_ON(std::max(conv_info.pad_top(), conv_info.pad_bottom()) > 4);
-
- ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1));
-
- const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type());
- const size_t weights_width = 3;
- const size_t weights_height = 3;
-
- const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(
- *input, TensorInfo(TensorShape(weights_width, weights_height), 1, weights->data_type()).set_data_layout(DataLayout::NCHW), conv_info, depth_multiplier, dilation);
- if(is_qasymm)
- {
- DepthwiseConvolutionReshapeInfo info;
- info.c0 = 4;
- ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(0) / info.c0) != weights_width * weights_height);
-
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output_multipliers, output_shifts);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
-
- if(is_data_type_quantized_per_channel(weights->data_type()))
- {
- ARM_COMPUTE_RETURN_ERROR_ON(output_shape[0] != output_multipliers->dimension(0));
- ARM_COMPUTE_RETURN_ERROR_ON(output_shape[0] != output_shifts->dimension(0));
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON(1 != output_multipliers->dimension(0));
- ARM_COMPUTE_RETURN_ERROR_ON(1 != output_shifts->dimension(0));
- }
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(1) != weights_width) || (weights->dimension(2) != weights_height));
- }
-
- if(biases != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != output_shape[0]);
- if(is_qasymm)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
- }
-
- ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
- }
-
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
- ITensorInfo *output_multipliers, ITensorInfo *output_shifts)
-{
- ARM_COMPUTE_UNUSED(weights);
- ARM_COMPUTE_UNUSED(depth_multiplier);
-
- const bool is_stride_1_dilation_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1) && dilation.x() == 1 && dilation.y() == 1);
- unsigned int num_rows_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1;
-
- Window win{};
- Status err{};
-
- if(is_data_type_quantized_asymmetric(input->data_type()))
- {
- const unsigned int num_elems_accessed_per_iteration = 4;
- const unsigned int num_rows_read_per_iteration = num_rows_processed_per_iteration + 2;
- const unsigned int num_rows_written_per_iteration = std::ceil(num_rows_processed_per_iteration / static_cast<float>(conv_info.stride().first));
-
- BorderSize border_size;
- border_size = BorderSize(conv_info.pad_left(), 0, std::max(std::max(conv_info.pad_right(), conv_info.pad_bottom()), conv_info.pad_top()), 0);
-
- // Configure kernel window
- win = calculate_max_window(*output, Steps(num_elems_accessed_per_iteration, num_rows_written_per_iteration));
-
- AccessWindowStatic input_access(input, 0, -border_size.top, ceil_to_multiple(input->dimension(0), num_elems_accessed_per_iteration),
- ceil_to_multiple(input->dimension(1) + border_size.bottom, num_rows_read_per_iteration));
- AccessWindowRectangle output_access(output, 0, 0, num_elems_accessed_per_iteration, num_rows_written_per_iteration);
-
- bool window_changed = false;
-
- if((output_multipliers != nullptr) && (output_shifts != nullptr))
- {
- AccessWindowHorizontal output_multipliers_access(output_multipliers, 0, num_elems_accessed_per_iteration);
- AccessWindowHorizontal output_shifts_access(output_shifts, 0, num_elems_accessed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, input_access, output_access, output_multipliers_access, output_shifts_access);
- }
- else
- {
- Status err = ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "output_multipliers and output_shifts must be non-nullptr for quantized input");
- return std::make_pair(err, win);
- }
-
- if(bias != nullptr)
- {
- AccessWindowHorizontal bias_access(bias, 0, num_elems_accessed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, bias_access);
- }
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
- err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- }
- else
- {
- unsigned int num_elems_accessed_per_iteration = adjust_vec_size(4 / input->element_size(), input->dimension(0));
- win = calculate_max_window(*output, Steps(num_elems_accessed_per_iteration, num_rows_processed_per_iteration));
- }
-
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLDepthwiseConvolutionLayer3x3NHWCKernel::CLDepthwiseConvolutionLayer3x3NHWCKernel()
- : _num_planes_processed_per_iteration(1)
-{
-}
-
-BorderSize CLDepthwiseConvolutionLayer3x3NHWCKernel::border_size() const
-{
- return _border_size;
-}
-
-void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts);
-}
-
-void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
- conv_info, depth_multiplier, act_info, dilation,
- (output_multipliers != nullptr) ? output_multipliers->info() : nullptr,
- (output_shifts != nullptr) ? output_shifts->info() : nullptr));
-
- auto padding_info = get_padding_info({ input, weights, biases, output });
-
- auto win_config = validate_and_configure_window(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(),
- conv_info, depth_multiplier, dilation,
- (output_multipliers != nullptr) ? output_multipliers->info() : nullptr,
- (output_shifts != nullptr) ? output_shifts->info() : nullptr);
-
- const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1));
- const bool is_stride_1_dilation_1 = (is_stride_1 && dilation.x() == 1 && dilation.y() == 1);
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
- const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()) && !is_quantized_per_channel;
-
- _input = input;
- _output = output;
- _weights = weights;
- _biases = biases;
- _conv_stride_y = conv_info.stride().second;
- _num_planes_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1;
- _output_multipliers = output_multipliers;
- _output_shifts = output_shifts;
- _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
-
- if(_is_quantized)
- {
- _border_size = BorderSize(input->info()->padding());
-
- // If QASYMM8 and the 8 bit dot product is available, force _num_planes_processed_per_iteration to 1
- if(is_dot8_supported)
- {
- _num_planes_processed_per_iteration = 1;
- }
- }
-
- unsigned int num_elems_accessed_per_iteration = _is_quantized ? 4 : adjust_vec_size(4 / input->info()->element_size(), input->info()->dimension(0));
- unsigned int num_rows_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1;
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_accessed_per_iteration));
- build_opts.add_option("-DSRC_DIM_1=" + support::cpp11::to_string(_input->info()->dimension(1)));
- build_opts.add_option("-DSRC_DIM_2=" + support::cpp11::to_string(_input->info()->dimension(2)));
- build_opts.add_option("-DCONV_PAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
- build_opts.add_option("-DCONV_PAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_accessed_per_iteration));
- build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
- build_opts.add_option_if(_input->info()->tensor_shape().total_size_upper(3) > 1,
- "-DDST_DEPTH=" + support::cpp11::to_string(static_cast<int>(std::ceil(_output->info()->dimension(2) / static_cast<float>(_num_planes_processed_per_iteration)))));
-
- if(_is_quantized)
- {
- const UniformQuantizationInfo iq_info = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wq_info = _weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = _output->info()->quantization_info().uniform();
-
- build_opts.add_option("-DSRC_DIM_1=" + support::cpp11::to_string(_input->info()->dimension(1)));
- build_opts.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iq_info.offset));
- build_opts.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wq_info.offset));
- build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oq_info.offset));
- build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(9 * iq_info.offset * wq_info.offset));
- build_opts.add_option_if(is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
- build_opts.add_option_if(is_dot8_supported, "-DIS_DOT8");
-
- // Compute non-per-channel multiplier and shift anyway to make OpenCL kernel simpler
- float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_opts.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_opts.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
-
- if(act_info.enabled())
- {
- int a_val{};
- int b_val{};
- std::tie(b_val, a_val) = get_quantized_activation_min_max(act_info, input->info()->data_type(), oq_info);
-
- const int o1 = oq_info.offset;
-
- build_opts.add_option("-DA_VAL=" + support::cpp11::to_string(a_val));
- build_opts.add_option("-DB_VAL=" + support::cpp11::to_string(b_val));
- build_opts.add_option("-DCONST_0=" + support::cpp11::to_string(o1));
-
- const float s1 = iq_info.scale;
- build_opts.add_option("-DS1_VAL=" + float_to_string_with_full_precision(s1));
- build_opts.add_option("-DO1_VAL=" + support::cpp11::to_string(o1));
- }
-
- build_opts.add_option("-DWEIGHTS_TYPE=" + get_cl_type_from_data_type(weights->info()->data_type()));
- build_opts.add_option("-DWEIGHTS_PROMOTED_TYPE=" + get_cl_promoted_type_from_data_type(weights->info()->data_type()));
- }
- else
- {
- build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
- }
-
- if(is_stride_1_dilation_1)
- {
- build_opts.add_option("-DNUM_ROWS_PROCESSED=" + support::cpp11::to_string(num_rows_processed_per_iteration));
- build_opts.add_option("-DNUM_PLANES_PROCESSED=" + support::cpp11::to_string(_num_planes_processed_per_iteration));
- build_opts.add_option("-DDST_DIM_1=" + support::cpp11::to_string(_output->info()->dimension(1)));
- build_opts.add_option("-DDST_DIM_2=" + support::cpp11::to_string(_output->info()->dimension(2)));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string((input->info()->dimension(1) + conv_info.pad_left() + conv_info.pad_right()) % num_rows_processed_per_iteration));
- }
- else
- {
- build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(conv_info.stride().first));
- build_opts.add_option("-DCONV_STRIDE_Y=" + support::cpp11::to_string(_conv_stride_y));
- build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x()));
- build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y()));
- }
-
- std::string kernel_name;
- // Create kernel
- if(_is_quantized)
- {
- kernel_name = std::string("dwc_3x3_reshaped_quantized8");
- kernel_name += (is_dot8_supported && is_stride_1_dilation_1 ? "_dot8" : "");
- kernel_name += (is_stride_1_dilation_1 ? "_stride1" : "");
- kernel_name += "_nhwc";
- }
- else
- {
- kernel_name = std::string("depthwise_convolution_3x3_nhwc");
- kernel_name += (is_stride_1_dilation_1 ? "_stride1" : "");
- }
-
- ICLKernel::configure_internal(win_config.second);
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- ARM_COMPUTE_ERROR_ON(!_is_quantized && has_padding_changed(padding_info));
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += string_from_data_type(input->info()->data_type());
-}
-
-Status CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
- const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(),
- biases != nullptr ? biases->clone().get() : nullptr,
- output->clone().get(), conv_info, depth_multiplier, dilation,
- (output_multipliers != nullptr) ? output_multipliers->clone().get() : nullptr,
- (output_shifts != nullptr) ? output_shifts->clone().get() : nullptr)
- .first);
- return Status{};
-}
-
-void CLDepthwiseConvolutionLayer3x3NHWCKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- const size_t total_batches = _input->info()->tensor_shape().total_size_upper(3);
-
- Window win = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- win.set(Window::DimZ, Window::Dimension(0, std::ceil(_output->info()->dimension(2) / static_cast<float>(_num_planes_processed_per_iteration)) * total_batches, 1));
-
- unsigned int idx = 2 * num_arguments_per_4D_tensor() + (_is_quantized ? num_arguments_per_2D_tensor() : num_arguments_per_3D_tensor());
-
- if(_is_quantized)
- {
- Window slice;
- slice.use_tensor_dimensions(_output_multipliers->info()->tensor_shape());
- slice.set_dimension_step(Window::DimX, window.x().step());
- add_1D_tensor_argument(idx, _output_multipliers, slice);
- add_1D_tensor_argument(idx, _output_shifts, slice);
- }
-
- if(_biases != nullptr)
- {
- Window win_biases;
- win_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
- win_biases.set_dimension_step(Window::DimX, window.x().step());
- add_1D_tensor_argument(idx, _biases, win_biases);
- }
-
- if(_is_quantized)
- {
- // Calculate the max_offset.
- // max_offset is the offset for the last NOT valid value in the Z dimension (spatial dimension Y for NHWC)
- // |******************|
- // | pad_top |
- // |******************|
- // | |
- // | plane0 |
- // | batch0 |
- // |__________________|
- // |******************| Batch 0
- // | pad_bottom |
- // | pad_top |
- // |******************|
- // | |
- // | plane1 |
- // | batch0 |
- // |__________________|-----> max_offset
- // |******************|
- // | pad_bottom |
- // | pad_top |
- // |******************|
- // | |
- // | plane0 |
- // | batch1 |
- // |__________________|
- // |******************| Batch 1
- // | pad_bottom |
- // | pad_top |
- // |******************|
- // | |
- // | plane1 |
- // | batch1 |
- // |__________________|
- // | pad_bottom |
- // |******************|
- const int max_offset = ((_input->info()->dimension(1) * _input->info()->dimension(2)) + (_input->info()->padding().bottom + _input->info()->padding().top) * (_input->info()->dimension(
- 2) - 1)) * _input->info()->strides_in_bytes().y();
- _kernel.setArg(idx, max_offset);
- }
-
- Window slice = win.first_slice_window_4D();
- do
- {
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, slice);
- add_4D_tensor_argument(idx, _output, slice);
- if(_is_quantized)
- {
- add_2D_tensor_argument(idx, _weights, slice);
- }
- else
- {
- add_3D_tensor_argument(idx, _weights, slice);
- }
- enqueue(queue, *this, slice, lws_hint());
- }
- while(win.slide_window_slice_4D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
deleted file mode 100644
index ce0bf5ceb3..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNHWCKERNEL3x3_H
-#define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNHWCKERNEL3x3_H
-
-#include "src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor when the data layout is NHWC.
- */
-class CLDepthwiseConvolutionLayer3x3NHWCKernel : public ICLDepthwiseConvolutionLayer3x3Kernel
-{
-public:
- /** Default constructor */
- CLDepthwiseConvolutionLayer3x3NHWCKernel();
- /** Default move assignment operator. */
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, 3, 3].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- */
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override;
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, 3, 3].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override;
- /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
- *
- * @param[in] input Source tensor info. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor info. A 3D tensor with dimensions [IFM, 3, 3].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] output Destination tensor info. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor info for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
- const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-private:
- unsigned int _num_planes_processed_per_iteration;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNHWCKERNEL3x3_H */
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
index c34018a000..b95abe795f 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,46 +28,94 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/ActivationFunctionUtils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
+#include "src/core/CL/CLUtils.h"
#include "src/core/CL/CLValidate.h"
#include "src/core/CL/ICLKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
+#include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h"
#include "support/StringSupport.h"
namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
- const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *weights,
+ const ITensorInfo *biases,
+ const ITensorInfo *output,
+ const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info,
+ const ITensorInfo *output_multipliers,
+ const ITensorInfo *output_shifts)
{
ARM_COMPUTE_UNUSED(dwc_info);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
+ bool in_place = false;
+ if (output == nullptr || output == input)
+ {
+ in_place = true;
+ output = input;
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier > 1 && dwc_weights_info.n0 != 1);
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1);
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().second < 1);
- ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
+ DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.stride().first > 1 && dwc_info.m0 != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.dilation.x() > 1 && dwc_info.m0 != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON((dwc_info.export_input_to_cl_image == true));
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((dwc_info.export_weights_to_cl_image == true) &&
+ (export_to_cl_image(weights) == false),
+ "Weights cannot be exported to cl_image!");
+ ARM_COMPUTE_RETURN_ERROR_ON((dwc_info.export_weights_to_cl_image == true) && ((dwc_info.n0 % 4) != 0));
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.stride().first < 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.stride().second < 1);
+ ARM_COMPUTE_RETURN_ERROR_ON((conv_info.dilation.x() < 1) || (conv_info.dilation.y() < 1));
const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
ARM_COMPUTE_UNUSED(idx_c);
- ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_c) != (input->dimension(idx_c) * depth_multiplier));
+ ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_c) != (input->dimension(idx_c) * conv_info.depth_multiplier));
+
+ // In place restrictions
+ if (in_place)
+ {
+ const int weights_width_idx =
+ get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH);
+ const int weights_height_idx =
+ get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::HEIGHT);
+ ARM_COMPUTE_RETURN_ERROR_ON(weights->tensor_shape()[weights_width_idx] != 1U ||
+ weights->tensor_shape()[weights_height_idx] != 1U);
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.depth_multiplier != 1U);
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.stride() != std::make_pair(1U, 1U));
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.dilation != Size2D(1U, 1U));
+ ARM_COMPUTE_RETURN_ERROR_ON(
+ conv_info.pad_stride_info
+ .has_padding()); // Note that in princple padding can be supported with in_place but we choose not to support it
+ }
- const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
+ const ConvolutionInfo info{conv_info.pad_stride_info, conv_info.depth_multiplier, ActivationLayerInfo(),
+ conv_info.dilation};
+ const TensorShape output_shape =
+ arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info);
+
+ if (conv_info.depth_multiplier > 1 && dwc_info.n0 > 1)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON((conv_info.depth_multiplier % dwc_info.n0) != 0);
+ }
const bool is_quantized = is_data_type_quantized(input->data_type());
- if(biases != nullptr)
+ if (biases != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != output_shape[idx_c]);
ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
- if(is_quantized)
+ if (is_quantized)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
}
@@ -77,7 +125,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
}
}
- if(is_quantized)
+ if (is_quantized)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output_multipliers, output_shifts);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
@@ -85,7 +133,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
- if(is_data_type_quantized_per_channel(weights->data_type()))
+ if (is_data_type_quantized_per_channel(weights->data_type()))
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QSYMM8_PER_CHANNEL);
ARM_COMPUTE_RETURN_ERROR_ON(output_shape[idx_c] != output_multipliers->dimension(0));
@@ -103,22 +151,24 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
}
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
- if(is_data_type_quantized(input->data_type()))
+ if (is_data_type_quantized(input->data_type()))
{
const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
const UniformQuantizationInfo wq_info = weights->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = (output->total_size() != 0) ? output->quantization_info().uniform() : iq_info;
+ const UniformQuantizationInfo oq_info =
+ (output->total_size() != 0) ? output->quantization_info().uniform() : iq_info;
float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
int output_multiplier = 0;
int output_shift = 0;
- ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
}
return Status{};
@@ -133,110 +183,194 @@ CLDepthwiseConvolutionLayerNativeKernel::CLDepthwiseConvolutionLayerNativeKernel
_depth_multiplier(1),
_output_multipliers(nullptr),
_output_shifts(nullptr),
+ _export_input_to_cl_image(false),
+ _export_weights_to_cl_image(false),
_is_quantized(false)
{
+ _type = CLKernelType::DEPTHWISE;
}
-void CLDepthwiseConvolutionLayerNativeKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
+void CLDepthwiseConvolutionLayerNativeKernel::configure(ICLTensor *input,
+ const ICLTensor *weights,
+ const ICLTensor *biases,
+ ICLTensor *output,
+ const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info,
+ const ICLTensor *output_multipliers,
+ const ICLTensor *output_shifts)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation, output_multipliers, output_shifts);
+ configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, dwc_info, conv_info,
+ output_multipliers, output_shifts);
}
-void CLDepthwiseConvolutionLayerNativeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
+void CLDepthwiseConvolutionLayerNativeKernel::configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ const ICLTensor *weights,
+ const ICLTensor *biases,
+ ICLTensor *output,
+ const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info,
+ const ICLTensor *output_multipliers,
+ const ICLTensor *output_shifts)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
- dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation,
- (output_multipliers != nullptr) ? output_multipliers->info() : nullptr, (output_shifts != nullptr) ? output_shifts->info() : nullptr));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
+ if (output == nullptr)
+ {
+ // In-place
+ output = input;
+ }
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(
+ input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), dwc_info,
+ conv_info, (output_multipliers != nullptr) ? output_multipliers->info() : nullptr,
+ (output_shifts != nullptr) ? output_shifts->info() : nullptr));
+
+ auto padding_info = get_padding_info({input, output});
+
+ const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(
+ *(input->info()), *(weights->info()), conv_info);
+ auto_init_if_empty(*(output->info()), input->info()
+ ->clone()
+ ->set_tensor_shape(output_shape)
+ .set_quantization_info(output->info()->quantization_info()));
+
+ _input = input;
+ _output = output;
+ _weights = weights;
+ _biases = biases;
+ _depth_multiplier = conv_info.depth_multiplier;
+ _output_multipliers = output_multipliers;
+ _output_shifts = output_shifts;
+ _export_input_to_cl_image = dwc_info.export_input_to_cl_image;
+ _export_weights_to_cl_image = dwc_info.export_weights_to_cl_image;
+ _is_quantized = is_data_type_quantized(input->info()->data_type());
+
+ const unsigned int n0 = adjust_vec_size(dwc_info.n0, output->info()->dimension(0));
+ const unsigned int m0 = std::min(dwc_info.m0, (unsigned int)output->info()->dimension(1));
+ std::string kernel_name = "";
- auto padding_info = get_padding_info({ input, output });
+ CLBuildOptions build_opts;
- const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*(input->info()), *(weights->info()), conv_info, depth_multiplier, dilation);
- auto_init_if_empty(*(output->info()), input->info()->clone()->set_tensor_shape(output_shape).set_quantization_info(output->info()->quantization_info()));
+ // Update the padding for the input/weights tensor if we can export to cl_image
+ if (_export_input_to_cl_image)
+ {
+ arm_compute::opencl::kernels::gemm::update_padding_for_cl_image(input->info());
+ }
- _input = input;
- _output = output;
- _weights = weights;
- _biases = biases;
- _depth_multiplier = depth_multiplier;
- _output_multipliers = output_multipliers;
- _output_shifts = output_shifts;
- _is_quantized = is_data_type_quantized(input->info()->data_type());
+ if (_export_weights_to_cl_image)
+ {
+ arm_compute::opencl::kernels::gemm::update_padding_for_cl_image(weights->info());
+ }
- const unsigned int n0 = adjust_vec_size(dwc_weights_info.n0, input->info()->dimension(0));
+ // Conditions of -cl-fast-relaxed-math causing accuracy issues can be traced from COMPMID-5324
+ const GPUTarget gpu_target = get_target();
+ const auto act_function = conv_info.act_info.activation();
+ const auto dst_data_type = _output->info()->data_type();
- CLBuildOptions build_opts;
- build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
- build_opts.add_option_if(_input->info()->tensor_shape().total_size_upper(3) > 1, "-DDST_DEPTH=" + support::cpp11::to_string(static_cast<int>(_output->info()->dimension(2))));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(dwc_info.activation_info.activation())));
- build_opts.add_option("-DDEPTH_MULTIPLIER=" + support::cpp11::to_string(depth_multiplier));
+ if ((gpu_target != GPUTarget::G71 && (gpu_target & GPUTarget::GPU_ARCH_MASK) == GPUTarget::BIFROST) &&
+ (act_function == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU ||
+ act_function == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) &&
+ (dst_data_type == DataType::F32 || dst_data_type == DataType::F16))
+ {
+ // -cl-fast-relaxed-math also sets -cl-finite-math-only and -cl-unsafe-math-optimizations
+ // to disable -cl-finite-math-only, we only include -cl-unsafe-math-optimizations
+ build_opts.add_option("-cl-unsafe-math-optimizations");
+ }
+ else
+ {
+ build_opts.add_option("-cl-fast-relaxed-math");
+ }
+
+ build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_function)));
+ build_opts.add_option("-DDEPTH_MULTIPLIER=" + support::cpp11::to_string(conv_info.depth_multiplier));
+ build_opts.add_option_if_else(_export_input_to_cl_image, "-DSRC_TENSOR_TYPE=IMAGE", "-DSRC_TENSOR_TYPE=BUFFER");
+ // Note: SRC_DATA_TYPE must have the same data type of WEI_DATA_TYPE. In quantized, we could
+ // have a case where the data types for the activation and weights are different. However, since the implementation
+ // only works when both have same data type, we have to change the offset to take into account this aspect
+ build_opts.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
+ build_opts.add_option("-DDST_TENSOR_TYPE=BUFFER");
+ build_opts.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(dst_data_type));
+ build_opts.add_option_if_else(_export_weights_to_cl_image, "-DWEI_TENSOR_TYPE=IMAGE", "-DWEI_TENSOR_TYPE=BUFFER");
+ build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(_input->info()->dimension(1)));
+ build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(_input->info()->dimension(2)));
+ build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(1)));
+ build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(2)));
+ build_opts.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(_weights->info()->dimension(1)));
+ build_opts.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(_weights->info()->dimension(2)));
+ build_opts.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(_weights->info()->data_type()));
+ build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_stride_info.pad_top()));
+ build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_stride_info.pad_left()));
+ build_opts.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_info.pad_stride_info.stride().first));
+ build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_info.pad_stride_info.stride().second));
+ build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(conv_info.dilation.x()));
+ build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(conv_info.dilation.y()));
build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
- build_opts.add_option("-DSRC_DIM1=" + support::cpp11::to_string(_input->info()->dimension(1)));
- build_opts.add_option("-DSRC_DIM2=" + support::cpp11::to_string(_input->info()->dimension(2)));
- build_opts.add_option("-DKERNEL_WIDTH=" + support::cpp11::to_string(weights->info()->dimension(1)));
- build_opts.add_option("-DKERNEL_HEIGHT=" + support::cpp11::to_string(weights->info()->dimension(2)));
- build_opts.add_option("-DCONV_PAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
- build_opts.add_option("-DCONV_PAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
- build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(conv_info.stride().first));
- build_opts.add_option("-DCONV_STRIDE_Y=" + support::cpp11::to_string(conv_info.stride().second));
- build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x()));
- build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y()));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(_input->info()->dimension(0) % n0));
-
- std::string kernel_name = (_is_quantized) ? "dwc_MxN_native_quantized8_nhwc" : "dwc_MxN_native_fp_nhwc";
-
- if(_is_quantized)
+ build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
+ build_opts.add_option("-DM0_A=" + support::cpp11::to_string(_weights->info()->dimension(1) + m0 - 1));
+ build_opts.add_option_if_else(conv_info.depth_multiplier > 1, "-DN0_A=1",
+ "-DN0_A=" + support::cpp11::to_string(n0));
+ build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(_output->info()->dimension(0) % n0));
+ build_opts.add_option_if(_input->info()->num_dimensions() > 3, "-DBATCHED_EXECUTION");
+
+ // Force unroll with pragma when any of the following values exceed the maximum number of manual unroll
+ set_unroll_with_pragma(build_opts, {static_cast<int>(_weights->info()->dimension(1) + m0 - 1),
+ static_cast<int>(_weights->info()->dimension(1)),
+ static_cast<int>(_weights->info()->dimension(2))});
+
+ if (biases != nullptr)
{
- const UniformQuantizationInfo iq_info = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wq_info = _weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = _output->info()->quantization_info().uniform();
+ build_opts.add_option(std::string("-DHAS_BIAS"));
+ build_opts.add_option(
+ std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(biases->info()->data_type())));
+ }
- build_opts.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iq_info.offset));
- build_opts.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wq_info.offset));
- build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oq_info.offset));
- build_opts.add_option_if(is_data_type_quantized_per_channel(weights->info()->data_type()), "-DPER_CHANNEL_QUANTIZATION");
+ if (_is_quantized)
+ {
+ kernel_name = "dwc_native_quantized_nhwc";
+ const UniformQuantizationInfo iqinfo = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo wqinfo = weights->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oqinfo = output->info()->quantization_info().uniform();
- // Compute non-per-channel multiplier and shift anyway to make OpenCL kernel simpler
- float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
+ PixelValue zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
+ int zero_value_s32;
+ zero_value.get(zero_value_s32);
+
+ float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
int output_multiplier = 0;
int output_shift = 0;
quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_opts.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_opts.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
-
- if(dwc_info.activation_info.enabled())
- {
- int a_val{};
- int b_val{};
- std::tie(b_val, a_val) = get_quantized_activation_min_max(dwc_info.activation_info, input->info()->data_type(), oq_info);
-
- const int o1 = oq_info.offset;
-
- build_opts.add_option("-DA_VAL=" + support::cpp11::to_string(a_val));
- build_opts.add_option("-DB_VAL=" + support::cpp11::to_string(b_val));
- build_opts.add_option("-DCONST_0=" + support::cpp11::to_string(o1));
-
- const float s1 = iq_info.scale;
- build_opts.add_option("-DS1_VAL=" + float_to_string_with_full_precision(s1));
- build_opts.add_option("-DO1_VAL=" + support::cpp11::to_string(o1));
- }
-
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DWEIGHTS_TYPE=" + get_cl_type_from_data_type(weights->info()->data_type()));
+ build_opts.add_option("-DDST_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
+ build_opts.add_option("-DDST_SHIFT=" + support::cpp11::to_string(output_shift));
+ build_opts.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
+ build_opts.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
+ build_opts.add_option("-DDST_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
+ build_opts.add_option("-DZERO_VALUE=" + support::cpp11::to_string(zero_value_s32));
+ build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(DataType::S32));
+ build_opts.add_option("-DDST_MULTIPLIERS_DATA_TYPE=" +
+ get_cl_type_from_data_type(_output_multipliers->info()->data_type()));
+ build_opts.add_option("-DDST_SHIFTS_DATA_TYPE=" +
+ get_cl_type_from_data_type(_output_shifts->info()->data_type()));
+ build_opts.add_option_if_else(weights->info()->data_type() == DataType::QSYMM8_PER_CHANNEL,
+ "-DQUANTIZATION_TYPE=PER_CHANNEL", "-DQUANTIZATION_TYPE=PER_TENSOR");
+ // Note: We expect the input and output tensors to always adopt a per-tensor quantization approach
+ int a_val{};
+ int b_val{};
+ std::tie(b_val, a_val) =
+ get_quantized_activation_min_max(conv_info.act_info, input->info()->data_type(), oqinfo);
+
+ build_opts.add_option_if(conv_info.act_info.enabled(), "-DA_VAL=" + support::cpp11::to_string(a_val));
+ build_opts.add_option_if(conv_info.act_info.enabled(), "-DB_VAL=" + support::cpp11::to_string(b_val));
}
else
{
- build_opts.add_option_if(dwc_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(dwc_info.activation_info.a()));
- build_opts.add_option_if(dwc_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(dwc_info.activation_info.b()));
+ kernel_name = "dwc_native_fp_nhwc";
+ build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option_if(conv_info.act_info.enabled(),
+ "-DA_VAL=" + float_to_string_with_full_precision(conv_info.act_info.a()));
+ build_opts.add_option_if(conv_info.act_info.enabled(),
+ "-DB_VAL=" + float_to_string_with_full_precision(conv_info.act_info.b()));
}
- Window win = calculate_max_window(*(output->info()), Steps(n0));
+ Window win = calculate_max_window(*(output->info()), Steps(n0, m0));
ICLKernel::configure_internal(win);
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
@@ -261,11 +395,17 @@ void CLDepthwiseConvolutionLayerNativeKernel::configure(const CLCompileContext &
_config_id += string_from_data_type(input->info()->data_type());
}
-Status CLDepthwiseConvolutionLayerNativeKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const DWCWeightsKernelInfo &dwc_weights_info, const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const Size2D &dilation, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
+Status CLDepthwiseConvolutionLayerNativeKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *weights,
+ const ITensorInfo *biases,
+ const ITensorInfo *output,
+ const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info,
+ const ITensorInfo *output_multipliers,
+ const ITensorInfo *output_shifts)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation, output_multipliers, output_shifts));
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_arguments(input, weights, biases, output, dwc_info, conv_info, output_multipliers, output_shifts));
return Status{};
}
@@ -276,37 +416,61 @@ void CLDepthwiseConvolutionLayerNativeKernel::run(const Window &window, cl::Comm
// Collapse window
Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ);
- Window slice_in = window.first_slice_window_4D();
- Window slice_out = window_collapsed.first_slice_window_4D();
- if(_depth_multiplier != 1)
- {
- ARM_COMPUTE_ERROR_ON(slice_out.x().step() != 1);
- slice_out.set(Window::DimX, Window::Dimension(0, _input->info()->tensor_shape()[0], 1));
- }
+ Window slice = window_collapsed.first_slice_window_4D();
- unsigned int idx = 2 * num_arguments_per_4D_tensor() + num_arguments_per_3D_tensor();
+ cl::Image2D input_cl_image;
+ cl::Image2D weights_cl_image;
- // Set output multipliers in case of quantized data type
- if(_is_quantized)
+ if (_export_input_to_cl_image || _export_weights_to_cl_image)
{
- add_1D_tensor_argument(idx, _output_multipliers, slice_in);
- add_1D_tensor_argument(idx, _output_shifts, slice_in);
+ // Export cl_buffer to cl_image
+ if (_export_input_to_cl_image)
+ {
+ const size_t image_w = _input->info()->dimension(0) / 4;
+ const size_t image_h =
+ _input->info()->dimension(1) * _input->info()->dimension(2) * _input->info()->dimension(3);
+ const TensorShape shape2d(image_w, image_h);
+ const size_t image_row_pitch = _input->info()->strides_in_bytes()[1];
+ input_cl_image =
+ create_image2d_from_buffer(CLKernelLibrary::get().context(), _input->cl_buffer(), shape2d,
+ _input->info()->data_type(), image_row_pitch, CLImage2DType::ReadOnly);
+ }
+
+ if (_export_weights_to_cl_image)
+ {
+ const size_t image_w = _weights->info()->dimension(0) / 4;
+ const size_t image_h =
+ _weights->info()->dimension(1) * _weights->info()->dimension(2) * _weights->info()->dimension(3);
+ const TensorShape shape2d(image_w, image_h);
+ const size_t image_row_pitch = _weights->info()->strides_in_bytes()[1];
+ weights_cl_image =
+ create_image2d_from_buffer(CLKernelLibrary::get().context(), _weights->cl_buffer(), shape2d,
+ _weights->info()->data_type(), image_row_pitch, CLImage2DType::ReadOnly);
+ }
}
- if(_biases != nullptr)
+ unsigned int idx = 0;
+ if (_export_input_to_cl_image)
{
- add_1D_tensor_argument(idx, _biases, slice_in);
+ _kernel.setArg(idx++, input_cl_image);
}
-
- do
+ add_4d_tensor_nhwc_argument(idx, _input);
+ add_4d_tensor_nhwc_argument(idx, _output);
+ if (_export_weights_to_cl_image)
+ {
+ _kernel.setArg(idx++, weights_cl_image);
+ }
+ add_4d_tensor_nhwc_argument(idx, _weights);
+ if (_is_quantized)
+ {
+ add_1D_tensor_argument(idx, _output_multipliers, slice);
+ add_1D_tensor_argument(idx, _output_shifts, slice);
+ }
+ if (_biases != nullptr)
{
- idx = 0;
- add_4D_tensor_argument(idx, _input, slice_in);
- add_4D_tensor_argument(idx, _output, slice_out);
- add_3D_tensor_argument(idx, _weights, slice_out);
- enqueue(queue, *this, slice_out, lws_hint());
+ add_1D_tensor_argument(idx, _biases, slice);
}
- while(window_collapsed.slide_window_slice_4D(slice_out) && window.slide_window_slice_4D(slice_in));
+ enqueue(queue, *this, slice, lws_hint());
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
index 325f4e7067..d34a662966 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,9 +24,10 @@
#ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H
#define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H
-#include "src/core/CL/ICLKernel.h"
-
#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/function_info/ConvolutionInfo.h"
+
+#include "src/core/CL/ICLKernel.h"
namespace arm_compute
{
@@ -47,85 +48,84 @@ public:
CLDepthwiseConvolutionLayerNativeKernel(CLDepthwiseConvolutionLayerNativeKernel &&) = default;
/** Allow instances of this class to be moved */
CLDepthwiseConvolutionLayerNativeKernel &operator=(CLDepthwiseConvolutionLayerNativeKernel &&) = default;
+
/** Initialize the function's source, destination and parameters
*
+ * @param[in] compile_context The compile context to be used.
* @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC
* @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, N, M].
* Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
* @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
* Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] dwc_weights_info Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread
+ * @param[out] output Destination tensor. Pass in nullptr or @p input for in-place operation. Data type supported: Same as @p input.
* @param[in] dwc_info Depthwise convolution layer info
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] conv_info Convolution info (padding, stride, dilation, ...)
* @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
* @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
+ *
+ * @note: In-place is only supported when
+ * * data layout: NHWC
+ * * filter: 1x1
+ * * @p depth_multiplier: 1
+ * * strides: 1
+ * * dilation: 1
+ * * no padding
+ * * no change of data layout after configure
*/
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
- /** Initialize the function's source, destination and parameters
+ void configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ const ICLTensor *weights,
+ const ICLTensor *biases,
+ ICLTensor *output,
+ const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info,
+ const ICLTensor *output_multipliers = nullptr,
+ const ICLTensor *output_shifts = nullptr);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel
*
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, N, M].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] dwc_weights_info Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread
- * @param[in] dwc_info Depthwise convolution layer info
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
+ * Similar to @ref CLDepthwiseConvolutionLayerNativeKernel::configure()
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
+ void configure(ICLTensor *input,
+ const ICLTensor *weights,
+ const ICLTensor *biases,
+ ICLTensor *output,
+ const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info,
+ const ICLTensor *output_multipliers = nullptr,
+ const ICLTensor *output_shifts = nullptr);
+
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel
*
- * @param[in] input Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC
- * @param[in] weights Weights tensor info. A 3D tensor with dimensions [IFM, N, M].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] output Destination tensor info. Data type supported: Same as @p input.
- * @param[in] dwc_weights_info Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread
- * @param[in] dwc_info Depthwise convolution layer info
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
+ * Similar to @ref CLDepthwiseConvolutionLayerNativeKernel::configure()
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
- const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr);
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *weights,
+ const ITensorInfo *biases,
+ const ITensorInfo *output,
+ const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info,
+ const ITensorInfo *output_multipliers = nullptr,
+ const ITensorInfo *output_shifts = nullptr);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
private:
- const ICLTensor *_input;
- const ICLTensor *_weights;
- const ICLTensor *_biases;
- ICLTensor *_output;
- unsigned int _depth_multiplier;
- const ICLTensor *_output_multipliers;
- const ICLTensor *_output_shifts;
- bool _is_quantized;
+ const ICLTensor *_input{};
+ const ICLTensor *_weights{};
+ const ICLTensor *_biases{};
+ ICLTensor *_output{};
+ unsigned int _depth_multiplier{0};
+ const ICLTensor *_output_multipliers{};
+ const ICLTensor *_output_shifts{};
+ bool _export_input_to_cl_image{false};
+ bool _export_weights_to_cl_image{true};
+ bool _is_quantized{false};
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H */
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp
deleted file mode 100644
index b10c23bde9..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/CL/ICLKernel.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
- const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
-
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC);
- ARM_COMPUTE_RETURN_ERROR_ON(info.c0 != 4);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_h) != 3);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_w) != 3);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
-
- if(output->total_size() != 0)
- {
- auto reshaped_weights_shape = arm_compute::misc::shape_calculator::compute_reshaped_depthwise_weights_shape(*input, info);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), reshaped_weights_shape);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info)
-{
- auto reshaped_input_shape = arm_compute::misc::shape_calculator::compute_reshaped_depthwise_weights_shape(*input, info);
- auto_init_if_empty(*output, reshaped_input_shape, 1, input->data_type(), input->quantization_info());
-
- Window win = calculate_max_window(*input, Steps(info.c0));
- AccessWindowHorizontal weights_access(input, 0, info.c0);
- const bool window_changed = update_window_and_padding(win, weights_access);
-
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLDepthwiseConvolutionLayerReshapeWeightsKernel::CLDepthwiseConvolutionLayerReshapeWeightsKernel()
- : _input(nullptr), _output(nullptr)
-{
-}
-
-void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
-}
-
-void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), info));
- auto win_config = validate_and_configure_window(input->info(), output->info(), info);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
-
- ICLKernel::configure_internal(win_config.second);
-
- _input = input;
- _output = output;
-
- // Build the kernel
- CLBuildOptions build_opts;
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(info.c0));
- build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(0)));
- build_opts.add_option_if(info.transpose, "-DTRANSPOSE");
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size()));
-
- _kernel = create_kernel(compile_context, "depthwise_convolution_reshape_weights", build_opts.options());
-}
-
-Status CLDepthwiseConvolutionLayerReshapeWeightsKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), info).first);
- return Status{};
-}
-
-void CLDepthwiseConvolutionLayerReshapeWeightsKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, window);
- add_2D_tensor_argument(idx, _output, window);
- enqueue(queue, *this, window, lws_hint());
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h
deleted file mode 100644
index 650fe9a11b..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERRESHAPEWEIGHTSKERNEL_H
-#define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERRESHAPEWEIGHTSKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the kernel to reshape the weights of depthwise convolution. */
-class CLDepthwiseConvolutionLayerReshapeWeightsKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLDepthwiseConvolutionLayerReshapeWeightsKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayerReshapeWeightsKernel(const CLDepthwiseConvolutionLayerReshapeWeightsKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayerReshapeWeightsKernel &operator=(const CLDepthwiseConvolutionLayerReshapeWeightsKernel &) = delete;
- /** Default Move Constructor. */
- CLDepthwiseConvolutionLayerReshapeWeightsKernel(CLDepthwiseConvolutionLayerReshapeWeightsKernel &&) = default;
- /** Default move assignment operator */
- CLDepthwiseConvolutionLayerReshapeWeightsKernel &operator=(CLDepthwiseConvolutionLayerReshapeWeightsKernel &&) = default;
-
- /** Initialize the function's source and destination.
- *
- * @param[in] input The input tensor of dimension [IFM, W, H]. Data types supported: All. Data layouts supported: NHWC
- * @param[out] output The output tensor of dimension [W*H*C0, ceil(IFM/C0)]. C0 is the number of channels read by each thread. Data types supported: same as @p weights.
- * @param[in] info Depthwise convolution information to reshape the input tensor.
- */
- void configure(const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info);
- /** Initialize the function's source and destination.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor of dimension [IFM, W, H]. Data types supported: All. Data layouts supported: NHWC
- * @param[out] output The output tensor of dimension [W*H*C0, ceil(IFM/C0)]. C0 is the number of channels read by each thread. Data types supported: same as @p weights.
- * @param[in] info Depthwise convolution information to reshape the input tensor.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info);
-
- /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
- *
- * @param[in] input The input tensor info of dimension [IFM, W, H]. Data types supported: All. Data layouts supported: NHWC
- * @param[in] output The output tensor info of dimension [W*H*C0, ceil(IFM/C0)]. C0 is the number of channels read by each thread. Data types supported: same as @p weights.
- * @param[in] info Depthwise convolution information to reshape the input tensor.
- *
- * @return a Status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
-
- void configure_dot_product(const DepthwiseConvolutionReshapeInfo &info);
- void configure_generic(const DepthwiseConvolutionReshapeInfo &info);
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERRESHAPEWEIGHTSKERNEL_H */
diff --git a/src/core/CL/kernels/CLDequantizationLayerKernel.cpp b/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
deleted file mode 100644
index 3723c651fe..0000000000
--- a/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLDequantizationLayerKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8, DataType::QSYMM16);
-
- if(output->tensor_shape().total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- }
-
- return Status{};
-}
-} // namespace
-
-CLDequantizationLayerKernel::CLDequantizationLayerKernel()
- : _input(nullptr), _output(nullptr)
-{
-}
-
-void CLDequantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output);
-}
-
-void CLDequantizationLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, DataType::F32);
-
- auto padding_info = get_padding_info({ input, output });
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
-
- _input = input;
- _output = output;
-
- const int vec_size_x = 16 / output->info()->element_size();
- const int output_width_x = output->info()->tensor_shape().x();
- const bool multi_access_x = (output_width_x / vec_size_x > 0);
-
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(input->info()->data_type());
- std::string kernel_name = "dequantization_layer";
-
- // Create kernel
- CLBuildOptions build_opts;
- if(!is_quantized_per_channel)
- {
- const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform();
- const int qoffset = is_data_type_quantized_asymmetric(input->info()->data_type()) ? qinfo.offset : 0;
- build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
- build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(qoffset));
- }
- else
- {
- kernel_name += "_per_channel";
- kernel_name += input->info()->data_layout() == DataLayout::NCHW ? "_nchw" : "_nhwc";
- }
-
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option("-DDATA_TYPE_SRC=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DDATA_TYPE_DST=" + get_cl_type_from_data_type(output->info()->data_type()));
- build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
-
- // Create kernel name
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*output->info());
- if(multi_access_x)
- {
- win.set(Window::DimX,
- Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
- }
- ICLKernel::configure_internal(win);
-
- // Set output valid region
- output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLDequantizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
- return Status{};
-}
-
-void CLDequantizationLayerKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(_input->info()->data_type());
-
- // Collapse windo
- Window new_window = is_quantized_per_channel ? window.collapse_if_possible(ICLKernel::window(), 4) : window.collapse_if_possible(ICLKernel::window(), 3);
- Window slice = new_window.first_slice_window_3D();
-
- if(is_quantized_per_channel)
- {
- unsigned int idx = num_arguments_per_3D_tensor() * 2; //Skip the input and output parameters
- _kernel.setArg(idx++, _input->quantization().scale->cl_buffer());
- }
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(new_window.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDequantizationLayerKernel.h b/src/core/CL/kernels/CLDequantizationLayerKernel.h
deleted file mode 100644
index 5579b5bc71..0000000000
--- a/src/core/CL/kernels/CLDequantizationLayerKernel.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDEQUANTIZATIONLAYERKERNEL_H
-#define ARM_COMPUTE_CLDEQUANTIZATIONLAYERKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the dequantization layer kernel. */
-class CLDequantizationLayerKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLDequantizationLayerKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDequantizationLayerKernel(const CLDequantizationLayerKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDequantizationLayerKernel &operator=(const CLDequantizationLayerKernel &) = delete;
- /** Default Move Constructor. */
- CLDequantizationLayerKernel(CLDequantizationLayerKernel &&) = default;
- /** Default move assignment operator */
- CLDequantizationLayerKernel &operator=(CLDequantizationLayerKernel &&) = default;
- /** Default destructor */
- ~CLDequantizationLayerKernel() = default;
- /** Set the input, output, min and max.
- *
- * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[out] output Destination tensor. Data types supported: F16/F32.
- */
- void configure(const ICLTensor *input, ICLTensor *output);
- /** Set the input, output, min and max.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[out] output Destination tensor. Data types supported: F16/F32.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayerKernel
- *
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[in] output Output tensor info. Data types supported: F16/F32.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDEQUANTIZATIONLAYERKERNEL_H */
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
deleted file mode 100644
index 2fc3c60f67..0000000000
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
+++ /dev/null
@@ -1,585 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/PixelValue.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
-
- const DataLayout data_layout = input->data_layout();
- const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != input->dimension(channel_idx),
- "Weights feature map dimension should match the respective input's one");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5 || weights->dimension(width_idx) == 9)
- && std::get<0>(conv_info.stride()) > 2,
- "Strides larger than 2 not supported for 3x3, 5x5, 9x9 convolution.");
-
- if(data_layout == DataLayout::NCHW)
- {
- if(is_data_type_quantized(input->data_type()))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5 && weights->dimension(width_idx) != 9,
- "Kernel sizes other than 1x1, 3x3, 5x5 or 9x9 are not supported with quantized data types");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5,
- "Kernel sizes other than 1x1, 3x3 or 5x5 are not supported with float data types");
- }
- }
-
- if(biases != nullptr)
- {
- if(is_data_type_quantized_asymmetric(input->data_type()))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->dimension(0) != weights->dimension(3),
- "Biases size and number of input feature maps should match");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1,
- "Biases should be one dimensional");
- }
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(),
- misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
-
- const auto data_type = input->data_type();
- if(is_data_type_quantized(data_type))
- {
- const UniformQuantizationInfo iqinfo = input->quantization_info().uniform();
- const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = output->quantization_info().uniform();
-
- float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
- }
- return Status{};
-}
-
-inline bool can_run_optimized_kernel_for_bifrost_nchw(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size,
- DataType data_type, DataLayout data_layout)
-{
- return gpu_target_is_in(gpu_target,
- GPUTarget::G71, GPUTarget::G72, GPUTarget::G76,
- GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT,
- GPUTarget::G52, GPUTarget::G52LIT)
- && (kernel_size <= 5)
- && (conv_stride_x == 1) && (conv_stride_y == 1)
- && (data_type == DataType::F32)
- && (data_layout == DataLayout::NCHW);
-}
-
-inline void setup_num_elems_nchw(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y,
- unsigned int &num_elems_written_per_iteration_x, unsigned int &num_elems_written_per_iteration_y,
- unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *input)
-{
- const DataType data_type = input->data_type();
- const DataLayout data_layout = input->data_layout();
- unsigned int conv_stride_x = std::get<0>(conv_info.stride());
- unsigned int conv_stride_y = std::get<1>(conv_info.stride());
-
- const bool run_optimized_bifrost = can_run_optimized_kernel_for_bifrost_nchw(target, conv_stride_x, conv_stride_y, kernel_size, data_type, data_layout);
-
- if(run_optimized_bifrost)
- {
- // Configure kernel window
- switch(kernel_size)
- {
- case 1:
- {
- num_elems_read_per_iteration_x = 4;
- num_elems_read_per_iteration_y = 4;
- num_elems_written_per_iteration_x = 4;
- num_elems_written_per_iteration_y = 4;
- break;
- }
- case 3:
- {
- num_elems_read_per_iteration_x = 6;
- num_elems_read_per_iteration_y = 5;
- num_elems_written_per_iteration_x = 4;
- num_elems_written_per_iteration_y = 3;
- break;
- }
- case 5:
- {
- num_elems_read_per_iteration_x = 8;
- num_elems_read_per_iteration_y = 6;
- num_elems_written_per_iteration_x = 4;
- num_elems_written_per_iteration_y = 2;
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Kernel size not optimized for Bifrost");
- }
- }
- }
- else
- {
- num_elems_read_per_iteration_y = kernel_size;
- num_elems_written_per_iteration_x = 8;
- num_elems_written_per_iteration_y = 1;
- switch(kernel_size)
- {
- case 1:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 8;
- break;
- case 2:
- num_elems_read_per_iteration_x = 16;
- break;
- case 3:
- switch(input->element_size())
- {
- case 1:
- num_elems_read_per_iteration_x = 28;
- break;
- case 2:
- num_elems_read_per_iteration_x = 24;
- break;
- case 4:
- num_elems_read_per_iteration_x = 22;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid data size");
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 3:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 10;
- break;
- case 2:
- num_elems_read_per_iteration_x = 17;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 5:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 12;
- break;
- case 2:
- num_elems_read_per_iteration_x = 20;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 9:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 16;
- break;
- case 2:
- num_elems_read_per_iteration_x = 24;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid direct convolution size");
- }
- }
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target)
-{
- const DataLayout data_layout = input->data_layout();
-
- // Get output shape
- TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info);
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output, output_shape,
- 1,
- input->data_type(),
- input->quantization_info());
-
- if(data_layout == DataLayout::NHWC)
- {
- const unsigned int vec_size = std::min(static_cast<unsigned int>(output->tensor_shape()[0]), 4u);
-
- // Create window and update padding
- Window win = calculate_max_window(*output, Steps(vec_size, 1U));
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
- Status err = Status{};
- return std::make_pair(err, win);
- }
- else if(data_layout == DataLayout::NCHW)
- {
- const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int kernel_size = weights->dimension(width_idx);
-
- unsigned int num_elems_read_per_iteration_x = 0;
- unsigned int num_elems_read_per_iteration_y = 0;
- unsigned int num_elems_written_per_iteration_x = 0;
- unsigned int num_elems_written_per_iteration_y = 0;
-
- unsigned int conv_pad_left = conv_info.pad_left();
- unsigned int conv_pad_top = conv_info.pad_top();
- unsigned int conv_stride_x = std::get<0>(conv_info.stride());
- unsigned int conv_stride_y = std::get<1>(conv_info.stride());
-
- setup_num_elems_nchw(num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
- num_elems_written_per_iteration_x, num_elems_written_per_iteration_y,
- kernel_size, conv_info, target, input);
-
- // Create window and update padding
- bool window_changed = false;
- Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
-
- AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, conv_stride_x, conv_stride_y);
- AccessWindowStatic weights_access(weights, 0, 0, kernel_size, kernel_size);
- AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
- window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported");
- }
-}
-} // namespace
-
-CLDirectConvolutionLayerKernel::CLDirectConvolutionLayerKernel()
- : _input(nullptr), _biases(nullptr), _weights(nullptr), _output(nullptr), _data_layout(DataLayout::UNKNOWN), _border_size(0), _conv_stride_x(0), _conv_stride_y(0), _conv_info()
-{
-}
-
-BorderSize CLDirectConvolutionLayerKernel::border_size() const
-{
- return _border_size;
-}
-
-void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info);
-}
-
-void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
-
- // Perform validation
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(),
- weights->info(),
- (biases != nullptr) ? biases->info() : nullptr,
- output->info(),
- conv_info));
-
- _conv_stride_x = std::get<0>(conv_info.stride());
- _conv_stride_y = std::get<1>(conv_info.stride());
- _data_layout = input->info()->data_layout();
- _input = input;
- _weights = weights;
- _output = output;
- _biases = biases;
- _conv_info = conv_info;
-
- const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
- const unsigned int kernel_size = weights->info()->dimension(width_idx);
- const DataType data_type = input->info()->data_type();
-
- const GPUTarget gpu_target = get_target();
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, gpu_target);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- std::stringstream kernel_name;
- CLBuildOptions build_options;
-
- if(_data_layout == DataLayout::NHWC)
- {
- _border_size = BorderSize();
-
- kernel_name << "direct_convolution_nhwc";
-
- const unsigned int n0 = win_config.second.x().step();
- const unsigned int m0 = win_config.second.y().step();
- const unsigned int k0 = adjust_vec_size(16u, _input->info()->dimension(channel_idx));
- const unsigned int partial_store_n0 = _output->info()->dimension(channel_idx) % n0;
- const unsigned int partial_store_m0 = (_output->info()->dimension(width_idx) * _output->info()->dimension(height_idx)) % m0;
- const unsigned int pad_left = conv_info.pad_left();
- const unsigned int pad_top = conv_info.pad_top();
-
- if(_biases != nullptr)
- {
- build_options.add_option(std::string("-DHAS_BIAS"));
- build_options.add_option(std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(_biases->info()->data_type())));
- }
- build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(_input->info()->dimension(width_idx)));
- build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(_input->info()->dimension(height_idx)));
- build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(_input->info()->dimension(channel_idx)));
- build_options.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
- build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(width_idx)));
- build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(height_idx)));
- build_options.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(_output->info()->dimension(channel_idx)));
- build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(_output->info()->data_type()));
- build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(_weights->info()->dimension(width_idx)));
- build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(_weights->info()->dimension(height_idx)));
- build_options.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(_weights->info()->data_type()));
- build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
- build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(_conv_stride_y));
- build_options.add_option("-DPAD_LEFT=" + support::cpp11::to_string(pad_left));
- build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(pad_top));
- build_options.add_option("-DN0=" + support::cpp11::to_string(n0));
- build_options.add_option("-DM0=" + support::cpp11::to_string(m0));
- build_options.add_option("-DK0=" + support::cpp11::to_string(k0));
- build_options.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
- build_options.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
-
- if(is_data_type_quantized(data_type))
- {
- const UniformQuantizationInfo iqinfo = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wqinfo = _weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = _output->info()->quantization_info().uniform();
-
- PixelValue zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
- int zero_value_s32;
- zero_value.get(zero_value_s32);
-
- float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_options.add_option("-DIS_QUANTIZED");
- build_options.add_option("-DDST_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_options.add_option("-DDST_SHIFT=" + support::cpp11::to_string(output_shift));
- build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
- build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
- build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
- build_options.add_option("-DZERO_VALUE=" + support::cpp11::to_string(zero_value_s32));
- build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(DataType::S32));
- }
- else
- {
- build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(0));
- build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(0));
- build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(0));
- }
- }
- else
- {
- _border_size = BorderSize(_input->info()->padding());
-
- kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
-
- build_options.add_option_if(_biases != nullptr, std::string("-DHAS_BIAS"));
-
- const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost_nchw(gpu_target, _conv_stride_x, _conv_stride_y, kernel_size, data_type, _data_layout);
-
- if(run_optimized_for_bifrost)
- {
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
-
- kernel_name << "_f32_bifrost";
- }
- else
- {
- build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
- build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type)));
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
- build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x)));
- build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
-
- if(is_data_type_quantized(data_type))
- {
- const UniformQuantizationInfo iqinfo = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wqinfo = _weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = _output->info()->quantization_info().uniform();
-
- float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_options.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_options.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
- build_options.add_option("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size));
- build_options.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
- build_options.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
- build_options.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
-
- kernel_name.str("direct_convolution_quantized");
- }
- }
- }
-
- _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name.str();
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(data_type));
- _config_id += "_";
- _config_id += support::cpp11::to_string(kernel_size);
- _config_id += "_";
- _config_id += support::cpp11::to_string(border_size().left);
- _config_id += "_";
- _config_id += support::cpp11::to_string(border_size().top);
- _config_id += "_";
- _config_id += support::cpp11::to_string(border_size().right);
- _config_id += "_";
- _config_id += support::cpp11::to_string(border_size().bottom);
- _config_id += "_";
- _config_id += support::cpp11::to_string(_conv_stride_x);
- _config_id += "_";
- _config_id += support::cpp11::to_string(_conv_stride_y);
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(width_idx));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(height_idx));
- _config_id += "_";
- _config_id += lower_string(string_from_data_layout(_data_layout));
-}
-
-Status CLDirectConvolutionLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const GPUTarget target)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, target).first);
-
- return Status{};
-}
-
-void CLDirectConvolutionLayerKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- // Get initial windows
- Window slice = window.first_slice_window_3D();
-
- if(_data_layout == DataLayout::NHWC)
- {
- slice.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1) * _output->info()->dimension(2), 1));
- slice.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(3), 1));
-
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _output, slice);
- add_3D_tensor_argument(idx, _weights, slice);
- if(_biases != nullptr)
- {
- add_1D_tensor_argument(idx, _biases, slice);
- }
- _kernel.setArg(idx++, static_cast<unsigned int>(_weights->info()->strides_in_bytes()[3]));
- enqueue(queue, *this, slice, lws_hint());
- }
- else
- {
- Window win_in = window;
-
- win_in.adjust(Window::DimX, -_conv_info.pad_left(), true);
- win_in.adjust(Window::DimY, -_conv_info.pad_top(), true);
-
- const int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
-
- win_in.set_dimension_step(width_idx, window[width_idx].step() * _conv_stride_x);
- win_in.set_dimension_step(height_idx, window[height_idx].step() * _conv_stride_y);
-
- Window slice_in = win_in.first_slice_window_3D();
- unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
- add_3D_tensor_argument(idx1, _weights, slice);
-
- if(_biases != nullptr)
- {
- Window slice_biases;
- slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
- add_1D_tensor_argument(idx1, _biases, slice_biases);
- }
-
- _kernel.setArg(idx1++, static_cast<unsigned int>(_weights->info()->strides_in_bytes()[3]));
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
- add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
- }
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.h b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.h
deleted file mode 100644
index 0257d0c2dd..0000000000
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYERKERNEL_H
-#define ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYERKERNEL_H
-
-#include "arm_compute/core/Types.h"
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the direct convolution kernel.
- */
-class CLDirectConvolutionLayerKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLDirectConvolutionLayerKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDirectConvolutionLayerKernel(const CLDirectConvolutionLayerKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDirectConvolutionLayerKernel &operator=(const CLDirectConvolutionLayerKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLDirectConvolutionLayerKernel(CLDirectConvolutionLayerKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLDirectConvolutionLayerKernel &operator=(CLDirectConvolutionLayerKernel &&) = default;
- /** Default destructor */
- ~CLDirectConvolutionLayerKernel() = default;
- /** Set the input, weights, biases and output tensors.
- *
- * @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3
- * 3x3 convolution with stride_x = 1/2, stride_y = 1/2
- * 5x5 convolution with stride_x = 1/2, stride_y = 1/2
- * 9x9 convolution with stride_x = 1/2, stride_y = 1/2
- *
- * @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * The 3rd dimension must be the same as the input's volume 3rd dimension.
- * Data type supported:Same as @p input.
- * @param[in] biases Biases tensor. Biases are 1D tensor with dimension [OFM].
- * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
- * @param[out] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- */
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
- /** Set the input, weights, biases and output tensors.
- *
- * @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3
- * 3x3 convolution with stride_x = 1/2, stride_y = 1/2
- * 5x5 convolution with stride_x = 1/2, stride_y = 1/2
- * 9x9 convolution with stride_x = 1/2, stride_y = 1/2
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * The 3rd dimension must be the same as the input's volume 3rd dimension.
- * Data type supported:Same as @p input.
- * @param[in] biases Biases tensor. Biases are 1D tensor with dimension [OFM].
- * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
- * @param[out] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayerKernel
- *
- * @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * The 3rd dimension must be the same as the input's volume 3rd dimension.
- * Data type supported:Same as @p input.
- * @param[in] biases Biases tensor. Biases are 1D tensor with dimension [OFM].
- * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type.
- * @param[in] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] target Target GPU architecture.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-public:
- const ICLTensor *_input;
- const ICLTensor *_biases;
- const ICLTensor *_weights;
- ICLTensor *_output;
- DataLayout _data_layout;
- BorderSize _border_size;
- int _conv_stride_x;
- int _conv_stride_y;
- PadStrideInfo _conv_info;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYERKERNEL_H */
diff --git a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
index 22eee11c8a..3d8f875ef7 100644
--- a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
+++ b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,9 @@
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -35,17 +38,20 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *idx,
+ const FFTDigitReverseKernelInfo &config)
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != 1 && input->num_channels() != 2);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(idx, 1, DataType::U32);
- ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({0, 1}).count(config.axis) == 0);
ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[config.axis] != idx->tensor_shape().x());
// Checks performed when output is configured
- if((output != nullptr) && (output->total_size() != 0))
+ if ((output != nullptr) && (output->total_size() != 0))
{
ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != 2);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
@@ -55,33 +61,42 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, ITensorInfo *idx, const FFTDigitReverseKernelInfo &config)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input,
+ ITensorInfo *output,
+ ITensorInfo *idx,
+ const FFTDigitReverseKernelInfo &config)
{
ARM_COMPUTE_UNUSED(idx, config);
auto_init_if_empty(*output, input->clone()->set_num_channels(2));
Window win = calculate_max_window(*output, Steps());
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
return std::make_pair(Status{}, win);
}
} // namespace
-CLFFTDigitReverseKernel::CLFFTDigitReverseKernel()
- : _input(nullptr), _output(nullptr), _idx(nullptr)
+CLFFTDigitReverseKernel::CLFFTDigitReverseKernel() : _input(nullptr), _output(nullptr), _idx(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLFFTDigitReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config)
+void CLFFTDigitReverseKernel::configure(const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *idx,
+ const FFTDigitReverseKernelInfo &config)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, idx, config);
}
-void CLFFTDigitReverseKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config)
+void CLFFTDigitReverseKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *idx,
+ const FFTDigitReverseKernelInfo &config)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, idx);
- auto padding_info = get_padding_info({ input, output, idx });
+ auto padding_info = get_padding_info({input, output, idx});
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), idx->info(), config));
_input = input;
@@ -112,10 +127,14 @@ void CLFFTDigitReverseKernel::configure(const CLCompileContext &compile_context,
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLFFTDigitReverseKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config)
+Status CLFFTDigitReverseKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *idx,
+ const FFTDigitReverseKernelInfo &config)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, idx, config));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), idx->clone().get(), config).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_and_configure_window(input->clone().get(), output->clone().get(), idx->clone().get(), config).first);
return Status{};
}
@@ -135,7 +154,6 @@ void CLFFTDigitReverseKernel::run(const Window &window, cl::CommandQueue &queue)
add_3D_tensor_argument(idx, _output, slice);
add_1D_tensor_argument(idx, _idx, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
+ } while (collapsed.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLFFTDigitReverseKernel.h b/src/core/CL/kernels/CLFFTDigitReverseKernel.h
index e5583a4c22..fdd1bcc3d3 100644
--- a/src/core/CL/kernels/CLFFTDigitReverseKernel.h
+++ b/src/core/CL/kernels/CLFFTDigitReverseKernel.h
@@ -24,10 +24,10 @@
#ifndef ARM_COMPUTE_CLFFTDIGITREVERSEKERNEL_H
#define ARM_COMPUTE_CLFFTDIGITREVERSEKERNEL_H
-#include "src/core/CL/ICLKernel.h"
-
#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/CL/ICLKernel.h"
+
namespace arm_compute
{
// Forward declarations
@@ -56,7 +56,8 @@ public:
* @param[in] idx Digit reverse index tensor. Data type supported: U32
* @param[in] config Kernel configuration.
*/
- void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config);
+ void
+ configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config);
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
@@ -65,7 +66,11 @@ public:
* @param[in] idx Digit reverse index tensor. Data type supported: U32
* @param[in] config Kernel configuration.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *idx,
+ const FFTDigitReverseKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTDigitReverseKernel
*
* @param[in] input Source tensor info. Data types supported: F16/F32.
@@ -75,7 +80,10 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config);
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *idx,
+ const FFTDigitReverseKernelInfo &config);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
index 5db3cb6bf2..3729e6b77d 100644
--- a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
+++ b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,6 +28,8 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -45,11 +47,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON(CLFFTRadixStageKernel::supported_radix().count(config.radix) == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({0, 1}).count(config.axis) == 0);
ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[config.axis] % config.radix);
// Checks performed when output is configured
- if((output != nullptr) && (output->total_size() != 0))
+ if ((output != nullptr) && (output->total_size() != 0))
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
@@ -58,9 +60,10 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const FFTRadixStageKernelInfo &config)
+std::pair<Status, Window>
+validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const FFTRadixStageKernelInfo &config)
{
- if(output != nullptr)
+ if (output != nullptr)
{
auto_init_if_empty(*output, *input);
}
@@ -70,18 +73,14 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
steps.set(config.axis, config.radix);
Window win = calculate_max_window(*input, steps);
- if(output != nullptr)
- {
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
- }
return std::make_pair(Status{}, win);
}
} // namespace
-CLFFTRadixStageKernel::CLFFTRadixStageKernel()
- : _input(nullptr), _output(nullptr), _run_in_place(false)
+CLFFTRadixStageKernel::CLFFTRadixStageKernel() : _input(nullptr), _output(nullptr), _run_in_place(false)
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLFFTRadixStageKernel::configure(ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config)
@@ -89,11 +88,15 @@ void CLFFTRadixStageKernel::configure(ICLTensor *input, ICLTensor *output, const
configure(CLKernelLibrary::get().get_compile_context(), input, output, config);
}
-void CLFFTRadixStageKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config)
+void CLFFTRadixStageKernel::configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output,
+ const FFTRadixStageKernelInfo &config)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr, config));
- auto padding_info = get_padding_info({ input, output });
+ ARM_COMPUTE_ERROR_THROW_ON(
+ validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr, config));
+ auto padding_info = get_padding_info({input, output});
_input = input;
_output = output;
@@ -112,11 +115,12 @@ void CLFFTRadixStageKernel::configure(const CLCompileContext &compile_context, I
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set static arguments if not the first stage
- if(!config.is_first_stage)
+ if (!config.is_first_stage)
{
const unsigned int Ni = config.Nx * config.radix;
const float exp_const = (-2.0 * M_PI) / static_cast<float>(Ni);
- unsigned int idx = (1 + (_run_in_place ? 0 : 1)) * num_arguments_per_3D_tensor(); // Skip the input and output parameters
+ unsigned int idx =
+ (1 + (_run_in_place ? 0 : 1)) * num_arguments_per_3D_tensor(); // Skip the input and output parameters
_kernel.setArg<cl_uint>(idx++, config.Nx);
_kernel.setArg<cl_uint>(idx++, Ni);
_kernel.setArg<cl_float>(idx, exp_const);
@@ -138,21 +142,22 @@ void CLFFTRadixStageKernel::configure(const CLCompileContext &compile_context, I
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLFFTRadixStageKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const FFTRadixStageKernelInfo &config)
+Status CLFFTRadixStageKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const FFTRadixStageKernelInfo &config)
{
const bool run_in_place = (output == nullptr) || (output == input);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, config));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(),
- (run_in_place) ? nullptr : output->clone().get(),
- config)
- .first);
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_and_configure_window(input->clone().get(), (run_in_place) ? nullptr : output->clone().get(), config)
+ .first);
return Status{};
}
std::set<unsigned int> CLFFTRadixStageKernel::supported_radix()
{
- return std::set<unsigned int> { 2, 3, 4, 5, 7, 8 };
+ return std::set<unsigned int>{2, 3, 4, 5, 7, 8};
}
void CLFFTRadixStageKernel::run(const Window &window, cl::CommandQueue &queue)
@@ -167,12 +172,11 @@ void CLFFTRadixStageKernel::run(const Window &window, cl::CommandQueue &queue)
{
unsigned int idx = 0;
add_3D_tensor_argument(idx, _input, slice);
- if(!_run_in_place)
+ if (!_run_in_place)
{
add_3D_tensor_argument(idx, _output, slice);
}
enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
+ } while (collapsed.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLFFTRadixStageKernel.h b/src/core/CL/kernels/CLFFTRadixStageKernel.h
index 9bb310db83..de80bfced3 100644
--- a/src/core/CL/kernels/CLFFTRadixStageKernel.h
+++ b/src/core/CL/kernels/CLFFTRadixStageKernel.h
@@ -24,10 +24,10 @@
#ifndef ARM_COMPUTE_CLFFTRADIXSTAGEKERNEL_H
#define ARM_COMPUTE_CLFFTRADIXSTAGEKERNEL_H
-#include "src/core/CL/ICLKernel.h"
-
#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/CL/ICLKernel.h"
+
#include <set>
namespace arm_compute
@@ -69,7 +69,10 @@ public:
* @param[out] output Destination tensor. Can be nullptr. Data type supported: same as @p input
* @param[in] config FFT descriptor metadata.
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config);
+ void configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output,
+ const FFTRadixStageKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTRadixStageKernel
*
* @param[in] input Source tensor info. Data types supported: F16/F32.
diff --git a/src/core/CL/kernels/CLFFTScaleKernel.cpp b/src/core/CL/kernels/CLFFTScaleKernel.cpp
index edcf5d5a5d..be6e16b074 100644
--- a/src/core/CL/kernels/CLFFTScaleKernel.cpp
+++ b/src/core/CL/kernels/CLFFTScaleKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,9 @@
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -41,7 +44,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F16, DataType::F32);
// Checks performed when output is configured
- if((output != nullptr) && (output->total_size() != 0))
+ if ((output != nullptr) && (output->total_size() != 0))
{
ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != 1 && output->num_channels() != 2);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
@@ -50,30 +53,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
return Status{};
}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
-{
- // Configure kernel window
- Window win = calculate_max_window(*input, Steps());
-
- if(output != nullptr)
- {
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output, *input->clone());
-
- // CLFFTScaleKernel doesn't need padding so update_window_and_padding() can be skipped
- Coordinates coord;
- coord.set_num_dimensions(output->num_dimensions());
- output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
- }
-
- return std::make_pair(Status{}, win);
-}
} // namespace
-CLFFTScaleKernel::CLFFTScaleKernel()
- : _input(nullptr), _output(nullptr), _run_in_place(false)
+CLFFTScaleKernel::CLFFTScaleKernel() : _input(nullptr), _output(nullptr), _run_in_place(false)
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLFFTScaleKernel::configure(ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config)
@@ -81,11 +65,14 @@ void CLFFTScaleKernel::configure(ICLTensor *input, ICLTensor *output, const FFTS
configure(CLKernelLibrary::get().get_compile_context(), input, output, config);
}
-void CLFFTScaleKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config)
+void CLFFTScaleKernel::configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output,
+ const FFTScaleKernelInfo &config)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr));
- auto padding_info = get_padding_info({ input, output });
+ auto padding_info = get_padding_info({input, output});
_input = input;
_output = output;
@@ -94,20 +81,28 @@ void CLFFTScaleKernel::configure(const CLCompileContext &compile_context, ICLTen
// Create kernel
CLBuildOptions build_opts;
build_opts.add_option_if(_run_in_place, "-DIN_PLACE");
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(output != nullptr ? output->info()->num_channels() : input->info()->num_channels()));
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(output != nullptr ? output->info()->num_channels()
+ : input->info()->num_channels()));
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
build_opts.add_option_if(config.conjugate, "-DCONJ");
std::string kernel_name = "fft_scale_conj";
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set static arguments
- unsigned int idx = (1 + (_run_in_place ? 0 : 1)) * num_arguments_per_3D_tensor(); // Skip the input and output parameters
+ unsigned int idx =
+ (1 + (_run_in_place ? 0 : 1)) * num_arguments_per_3D_tensor(); // Skip the input and output parameters
_kernel.setArg<cl_float>(idx, config.scale);
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), _run_in_place ? nullptr : output->info());
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ Window win = calculate_max_window(*input->info(), Steps());
+
+ if (output != nullptr)
+ {
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output->info(), *input->info()->clone());
+ }
+
+ ICLKernel::configure_internal(win);
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
@@ -124,7 +119,6 @@ Status CLFFTScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *o
{
ARM_COMPUTE_UNUSED(config);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
return Status{};
}
@@ -141,12 +135,11 @@ void CLFFTScaleKernel::run(const Window &window, cl::CommandQueue &queue)
{
unsigned int idx = 0;
add_3D_tensor_argument(idx, _input, slice);
- if(!_run_in_place)
+ if (!_run_in_place)
{
add_3D_tensor_argument(idx, _output, slice);
}
enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
+ } while (collapsed.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLFFTScaleKernel.h b/src/core/CL/kernels/CLFFTScaleKernel.h
index cc518be193..b995282e02 100644
--- a/src/core/CL/kernels/CLFFTScaleKernel.h
+++ b/src/core/CL/kernels/CLFFTScaleKernel.h
@@ -24,10 +24,10 @@
#ifndef ARM_COMPUTE_CLFFTSCALEKERNEL_H
#define ARM_COMPUTE_CLFFTSCALEKERNEL_H
-#include "src/core/CL/ICLKernel.h"
-
#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/CL/ICLKernel.h"
+
namespace arm_compute
{
// Forward declarations
@@ -63,7 +63,10 @@ public:
* @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] config Kernel configuration
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config);
+ void configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output,
+ const FFTScaleKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTScaleKernel
*
* @param[in] input Source tensor info. Data types supported: F16/F32.
diff --git a/src/core/CL/kernels/CLFillBorderKernel.cpp b/src/core/CL/kernels/CLFillBorderKernel.cpp
index 840ed0ca2f..86bb502da3 100644
--- a/src/core/CL/kernels/CLFillBorderKernel.cpp
+++ b/src/core/CL/kernels/CLFillBorderKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,16 +29,18 @@
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/StringUtils.h"
#include "arm_compute/core/Validate.h"
+
#include "src/core/helpers/WindowHelpers.h"
#include "support/Cast.h"
#include "support/StringSupport.h"
namespace arm_compute
{
-CLFillBorderKernel::CLFillBorderKernel()
- : ICLKernel(), _tensor(nullptr)
+CLFillBorderKernel::CLFillBorderKernel() : ICLKernel(), _tensor(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
bool CLFillBorderKernel::is_parallelisable() const
@@ -54,27 +56,38 @@ void CLFillBorderKernel::set_constant_border(unsigned int idx, const PixelValue
ICLKernel::add_argument<T>(idx, static_cast<T>(value));
}
-void CLFillBorderKernel::configure(ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value)
+void CLFillBorderKernel::configure(ICLTensor *tensor,
+ BorderSize border_size,
+ BorderMode border_mode,
+ const PixelValue &constant_border_value)
{
configure(CLKernelLibrary::get().get_compile_context(), tensor, border_size, border_mode, constant_border_value);
}
-void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value)
+void CLFillBorderKernel::configure(const CLCompileContext &compile_context,
+ ICLTensor *tensor,
+ BorderSize border_size,
+ BorderMode border_mode,
+ const PixelValue &constant_border_value)
{
_tensor = tensor;
configure(compile_context, tensor->info(), border_size, border_mode, constant_border_value);
}
-void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ITensorInfo *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value)
+void CLFillBorderKernel::configure(const CLCompileContext &compile_context,
+ ITensorInfo *tensor,
+ BorderSize border_size,
+ BorderMode border_mode,
+ const PixelValue &constant_border_value)
{
ARM_COMPUTE_ERROR_ON(tensor == nullptr);
ARM_COMPUTE_ERROR_ON(tensor->num_channels() != 1);
- auto padding_info = get_padding_info({ tensor });
+ auto padding_info = get_padding_info({tensor});
border_size.limit(tensor->padding());
// If there is no border: early exit
- if(border_size.empty() || border_mode == BorderMode::UNDEFINED)
+ if (border_size.empty() || border_mode == BorderMode::UNDEFINED)
{
return;
}
@@ -96,25 +109,22 @@ void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ITen
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Create static kernel arguments
- const unsigned int valid_width = tensor->valid_region().shape[0];
- const unsigned int valid_height = tensor->valid_region().shape[1];
- const cl_int2 valid_region_coords =
- {
- {
- static_cast<cl_int>(tensor->valid_region().anchor[0]),
- static_cast<cl_int>(tensor->valid_region().anchor[1]),
- }
- };
- const unsigned int total_valid_width = border_size.left + valid_width + border_size.right;
+ const unsigned int valid_width = tensor->valid_region().shape[0];
+ const unsigned int valid_height = tensor->valid_region().shape[1];
+ const cl_int2 valid_region_coords = {{
+ static_cast<cl_int>(tensor->valid_region().anchor[0]),
+ static_cast<cl_int>(tensor->valid_region().anchor[1]),
+ }};
+ const unsigned int total_valid_width = border_size.left + valid_width + border_size.right;
// Set static kernel arguments
unsigned int idx = num_arguments_per_3D_tensor(); //Skip the tensor parameters
ICLKernel::add_argument<cl_uint>(idx, valid_width);
ICLKernel::add_argument<cl_uint>(idx, valid_height);
ICLKernel::add_argument<cl_int2>(idx, valid_region_coords);
- if(BorderMode::CONSTANT == border_mode)
+ if (BorderMode::CONSTANT == border_mode)
{
- switch(dt)
+ switch (dt)
{
case DataType::U8:
case DataType::QASYMM8:
@@ -173,12 +183,13 @@ void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ITen
void CLFillBorderKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
// Border mode undefined or border width == 0
- if(_kernel() == nullptr)
+ if (_kernel() == nullptr)
{
return;
}
- const auto tensor = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
+ const auto tensor =
+ utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
@@ -191,14 +202,13 @@ void CLFillBorderKernel::run_op(ITensorPack &tensors, const Window &window, cl::
unsigned int idx = 0;
add_3D_tensor_argument(idx, tensor, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
+ } while (collapsed.slide_window_slice_3D(slice));
}
void CLFillBorderKernel::run(const Window &window, cl::CommandQueue &queue)
{
// Border mode undefined or border width == 0
- if(_kernel() == nullptr)
+ if (_kernel() == nullptr)
{
return;
}
@@ -214,7 +224,6 @@ void CLFillBorderKernel::run(const Window &window, cl::CommandQueue &queue)
unsigned int idx = 0;
add_3D_tensor_argument(idx, _tensor, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
+ } while (collapsed.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLFillBorderKernel.h b/src/core/CL/kernels/CLFillBorderKernel.h
index 7951f48171..5782143cf9 100644
--- a/src/core/CL/kernels/CLFillBorderKernel.h
+++ b/src/core/CL/kernels/CLFillBorderKernel.h
@@ -26,6 +26,7 @@
#include "arm_compute/core/PixelValue.h"
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -57,7 +58,11 @@ public:
* @param[in] border_mode Border mode to use for the convolution.
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue());
+ void configure(const CLCompileContext &compile_context,
+ ICLTensor *tensor,
+ BorderSize border_size,
+ BorderMode border_mode,
+ const PixelValue &constant_border_value = PixelValue());
/** Initialise the kernel's input, output and border mode.
*
* @param[in,out] tensor Tensor to process Data types supported: U8/QASYMM8/S8/QASYMM8_SIGNED/U16/S16/U32/S32/F16/F32.
@@ -65,7 +70,10 @@ public:
* @param[in] border_mode Border mode to use for the convolution.
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
*/
- void configure(ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue());
+ void configure(ICLTensor *tensor,
+ BorderSize border_size,
+ BorderMode border_mode,
+ const PixelValue &constant_border_value = PixelValue());
/** Initialise the kernel's input, output and border mode.
*
* @param[in] compile_context The compile context to be used.
@@ -74,7 +82,11 @@ public:
* @param[in] border_mode Border mode to use for the convolution.
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
*/
- void configure(const CLCompileContext &compile_context, ITensorInfo *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue());
+ void configure(const CLCompileContext &compile_context,
+ ITensorInfo *tensor,
+ BorderSize border_size,
+ BorderMode border_mode,
+ const PixelValue &constant_border_value = PixelValue());
/** Function to set the constant value on fill border kernel depending on type.
*
diff --git a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp
index 2116239080..7da0679ae4 100644
--- a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp
+++ b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,20 +29,27 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensorInfo *bn_var,
- const ITensorInfo *fused_weights, const ITensorInfo *fused_bias,
- const ITensorInfo *input_bias, const ITensorInfo *bn_beta, const ITensorInfo *bn_gamma,
- float epsilon, FuseBatchNormalizationType fbn_type)
+Status validate_arguments(const ITensorInfo *input_weights,
+ const ITensorInfo *bn_mean,
+ const ITensorInfo *bn_var,
+ const ITensorInfo *fused_weights,
+ const ITensorInfo *fused_bias,
+ const ITensorInfo *input_bias,
+ const ITensorInfo *bn_beta,
+ const ITensorInfo *bn_gamma,
+ float epsilon,
+ FuseBatchNormalizationType fbn_type)
{
ARM_COMPUTE_UNUSED(epsilon);
ARM_COMPUTE_ERROR_ON_NULLPTR(input_weights, bn_mean, bn_var);
@@ -53,43 +60,44 @@ Status validate_arguments(const ITensorInfo *input_weights, const ITensorInfo *b
ARM_COMPUTE_RETURN_ERROR_ON(input_bias == nullptr && fused_bias == nullptr);
ARM_COMPUTE_RETURN_ERROR_ON(bn_mean->num_dimensions() > 1);
- if(fbn_type == FuseBatchNormalizationType::CONVOLUTION)
+ if (fbn_type == FuseBatchNormalizationType::CONVOLUTION)
{
ARM_COMPUTE_RETURN_ERROR_ON(input_weights->dimension(3) != bn_mean->dimension(0));
}
else
{
- const size_t channel_idx = get_data_layout_dimension_index(input_weights->data_layout(), DataLayoutDimension::CHANNEL);
+ const size_t channel_idx =
+ get_data_layout_dimension_index(input_weights->data_layout(), DataLayoutDimension::CHANNEL);
ARM_COMPUTE_RETURN_ERROR_ON(input_weights->dimension(channel_idx) != bn_mean->dimension(0));
}
// Validate bias
- if(input_bias != nullptr)
+ if (input_bias != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, input_bias);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, input_bias);
}
// Validate beta
- if(bn_beta != nullptr)
+ if (bn_beta != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, bn_beta);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_beta);
}
// Validate gamma
- if(bn_gamma != nullptr)
+ if (bn_gamma != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, bn_gamma);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_gamma);
}
// Validate output weights
- if(fused_weights != nullptr && fused_weights->total_size() != 0)
+ if (fused_weights != nullptr && fused_weights->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input_weights, fused_weights);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input_weights, fused_weights);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, fused_weights);
}
// Validate output bias
- if(fused_bias != nullptr && fused_bias->total_size() != 0)
+ if (fused_bias != nullptr && fused_bias->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, fused_bias);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, fused_bias);
@@ -100,27 +108,52 @@ Status validate_arguments(const ITensorInfo *input_weights, const ITensorInfo *b
} // namespace
CLFuseBatchNormalizationKernel::CLFuseBatchNormalizationKernel()
- : _input_weights(nullptr), _input_bias(nullptr), _bn_mean(nullptr), _bn_var(nullptr), _bn_gamma(nullptr), _bn_beta(nullptr), _fused_weights(nullptr), _fused_bias(nullptr), _epsilon(),
- _run_in_place_weights(false), _run_in_place_bias(false)
+ : _input_weights(nullptr),
+ _input_bias(nullptr),
+ _bn_mean(nullptr),
+ _bn_var(nullptr),
+ _bn_gamma(nullptr),
+ _bn_beta(nullptr),
+ _fused_weights(nullptr),
+ _fused_bias(nullptr),
+ _epsilon(),
+ _run_in_place_weights(false),
+ _run_in_place_bias(false)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLFuseBatchNormalizationKernel::configure(const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var,
- ICLTensor *fused_weights, ICLTensor *fused_bias,
- const ICLTensor *input_bias, const ICLTensor *bn_beta, const ICLTensor *bn_gamma,
- float epsilon, FuseBatchNormalizationType fbn_type)
+void CLFuseBatchNormalizationKernel::configure(const ICLTensor *input_weights,
+ const ICLTensor *bn_mean,
+ const ICLTensor *bn_var,
+ ICLTensor *fused_weights,
+ ICLTensor *fused_bias,
+ const ICLTensor *input_bias,
+ const ICLTensor *bn_beta,
+ const ICLTensor *bn_gamma,
+ float epsilon,
+ FuseBatchNormalizationType fbn_type)
{
- configure(CLKernelLibrary::get().get_compile_context(), input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, bn_beta, bn_gamma, epsilon, fbn_type);
+ configure(CLKernelLibrary::get().get_compile_context(), input_weights, bn_mean, bn_var, fused_weights, fused_bias,
+ input_bias, bn_beta, bn_gamma, epsilon, fbn_type);
}
-void CLFuseBatchNormalizationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var,
- ICLTensor *fused_weights, ICLTensor *fused_bias,
- const ICLTensor *input_bias, const ICLTensor *bn_beta, const ICLTensor *bn_gamma,
- float epsilon, FuseBatchNormalizationType fbn_type)
+void CLFuseBatchNormalizationKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input_weights,
+ const ICLTensor *bn_mean,
+ const ICLTensor *bn_var,
+ ICLTensor *fused_weights,
+ ICLTensor *fused_bias,
+ const ICLTensor *input_bias,
+ const ICLTensor *bn_beta,
+ const ICLTensor *bn_gamma,
+ float epsilon,
+ FuseBatchNormalizationType fbn_type)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input_weights, bn_mean, bn_var);
- auto padding_info = get_padding_info({ input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, bn_beta, bn_gamma });
+ auto padding_info =
+ get_padding_info({input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, bn_beta, bn_gamma});
_input_weights = input_weights;
_input_bias = input_bias;
@@ -133,28 +166,28 @@ void CLFuseBatchNormalizationKernel::configure(const CLCompileContext &compile_c
_epsilon = epsilon;
_run_in_place_weights = (fused_weights == nullptr) || (fused_weights == input_weights);
- _run_in_place_bias = (input_bias != nullptr && fused_bias == nullptr) || (input_bias != nullptr && fused_bias == input_bias);
+ _run_in_place_bias =
+ (input_bias != nullptr && fused_bias == nullptr) || (input_bias != nullptr && fused_bias == input_bias);
// Auto initialize outputs
- if(_fused_weights != nullptr)
+ if (_fused_weights != nullptr)
{
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*_fused_weights->info(), *_input_weights->info()->clone());
}
- if(_fused_bias != nullptr)
+ if (_fused_bias != nullptr)
{
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*_fused_bias->info(), *_bn_mean->info()->clone());
}
// Validate arguments
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input_weights->info(), bn_mean->info(), bn_var->info(),
- (fused_weights != nullptr) ? fused_weights->info() : nullptr,
- (fused_bias != nullptr) ? fused_bias->info() : nullptr,
- (input_bias != nullptr) ? input_bias->info() : nullptr,
- (bn_beta != nullptr) ? bn_beta->info() : nullptr,
- (bn_gamma != nullptr) ? bn_gamma->info() : nullptr,
- epsilon, fbn_type));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(
+ input_weights->info(), bn_mean->info(), bn_var->info(),
+ (fused_weights != nullptr) ? fused_weights->info() : nullptr,
+ (fused_bias != nullptr) ? fused_bias->info() : nullptr, (input_bias != nullptr) ? input_bias->info() : nullptr,
+ (bn_beta != nullptr) ? bn_beta->info() : nullptr, (bn_gamma != nullptr) ? bn_gamma->info() : nullptr, epsilon,
+ fbn_type));
// Configure kernel window
Window win = calculate_max_window(*input_weights->info());
@@ -163,7 +196,8 @@ void CLFuseBatchNormalizationKernel::configure(const CLCompileContext &compile_c
// Set build options
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input_weights->info()->data_type()));
- build_opts.add_option_if(fbn_type == FuseBatchNormalizationType::CONVOLUTION, "-DDIM2=" + support::cpp11::to_string(input_weights->info()->dimension(2)));
+ build_opts.add_option_if(fbn_type == FuseBatchNormalizationType::CONVOLUTION,
+ "-DDIM2=" + support::cpp11::to_string(input_weights->info()->dimension(2)));
build_opts.add_option("-DEPSILON=" + float_to_string_with_full_precision(epsilon));
build_opts.add_option_if(_input_weights->info()->data_layout() == DataLayout::NHWC, "-DNHWC");
build_opts.add_option_if(_run_in_place_weights, "-DIN_PLACE_W");
@@ -178,12 +212,19 @@ void CLFuseBatchNormalizationKernel::configure(const CLCompileContext &compile_c
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLFuseBatchNormalizationKernel::validate(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensorInfo *bn_var,
- const ITensorInfo *fused_weights, const ITensorInfo *fused_bias,
- const ITensorInfo *input_bias, const ITensorInfo *bn_beta, const ITensorInfo *bn_gamma,
- float epsilon, FuseBatchNormalizationType fbn_type)
+Status CLFuseBatchNormalizationKernel::validate(const ITensorInfo *input_weights,
+ const ITensorInfo *bn_mean,
+ const ITensorInfo *bn_var,
+ const ITensorInfo *fused_weights,
+ const ITensorInfo *fused_bias,
+ const ITensorInfo *input_bias,
+ const ITensorInfo *bn_beta,
+ const ITensorInfo *bn_gamma,
+ float epsilon,
+ FuseBatchNormalizationType fbn_type)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, bn_beta, bn_gamma, epsilon, fbn_type));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input_weights, bn_mean, bn_var, fused_weights, fused_bias,
+ input_bias, bn_beta, bn_gamma, epsilon, fbn_type));
return Status{};
}
@@ -200,25 +241,25 @@ void CLFuseBatchNormalizationKernel::run(const arm_compute::Window &window, cl::
// Add kernel arguments
unsigned int idx = 0;
add_3D_tensor_argument(idx, _input_weights, slice_3d);
- if(_input_bias != nullptr)
+ if (_input_bias != nullptr)
{
add_1D_tensor_argument(idx, _input_bias, slice_1d);
}
add_1D_tensor_argument(idx, _bn_mean, slice_1d);
add_1D_tensor_argument(idx, _bn_var, slice_1d);
- if(!_run_in_place_weights)
+ if (!_run_in_place_weights)
{
add_3D_tensor_argument(idx, _fused_weights, slice_3d);
}
- if(!_run_in_place_bias)
+ if (!_run_in_place_bias)
{
add_1D_tensor_argument(idx, _fused_bias, slice_1d);
}
- if(_bn_beta != nullptr)
+ if (_bn_beta != nullptr)
{
add_1D_tensor_argument(idx, _bn_beta, slice_1d);
}
- if(_bn_gamma != nullptr)
+ if (_bn_gamma != nullptr)
{
add_1D_tensor_argument(idx, _bn_gamma, slice_1d);
}
diff --git a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.h b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.h
index 78b1e74cab..76ec7a759f 100644
--- a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.h
+++ b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.h
@@ -62,9 +62,16 @@ public:
* @param[in] epsilon (Optional) Batch normalization layer epsilon parameter. Defaults to 0.001f.
* @param[in] fbn_type (Optional) Fused batch normalization type. Defaults to CONVOLUTION.
*/
- void configure(const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var, ICLTensor *fused_weights, ICLTensor *fused_bias,
- const ICLTensor *input_bias = nullptr, const ICLTensor *bn_beta = nullptr, const ICLTensor *bn_gamma = nullptr,
- float epsilon = 0.001f, FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
+ void configure(const ICLTensor *input_weights,
+ const ICLTensor *bn_mean,
+ const ICLTensor *bn_var,
+ ICLTensor *fused_weights,
+ ICLTensor *fused_bias,
+ const ICLTensor *input_bias = nullptr,
+ const ICLTensor *bn_beta = nullptr,
+ const ICLTensor *bn_gamma = nullptr,
+ float epsilon = 0.001f,
+ FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
/** Set the source, destination of the kernel
*
* @param[in] compile_context The compile context to be used.
@@ -81,9 +88,17 @@ public:
* @param[in] epsilon (Optional) Batch normalization layer epsilon parameter. Defaults to 0.001f.
* @param[in] fbn_type (Optional) Fused batch normalization type. Defaults to CONVOLUTION.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var, ICLTensor *fused_weights, ICLTensor *fused_bias,
- const ICLTensor *input_bias = nullptr, const ICLTensor *bn_beta = nullptr, const ICLTensor *bn_gamma = nullptr,
- float epsilon = 0.001f, FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input_weights,
+ const ICLTensor *bn_mean,
+ const ICLTensor *bn_var,
+ ICLTensor *fused_weights,
+ ICLTensor *fused_bias,
+ const ICLTensor *input_bias = nullptr,
+ const ICLTensor *bn_beta = nullptr,
+ const ICLTensor *bn_gamma = nullptr,
+ float epsilon = 0.001f,
+ FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
/** Static function to check if given info will lead to a valid configuration of @ref CLFuseBatchNormalizationKernel
*
* @param[in] input_weights Input weights tensor info for convolution or depthwise convolution layer. Data type supported: F16/F32. Data layout supported: NCHW, NHWC
@@ -101,10 +116,16 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensorInfo *bn_var,
- const ITensorInfo *fused_weights, const ITensorInfo *fused_bias,
- const ITensorInfo *input_bias = nullptr, const ITensorInfo *bn_beta = nullptr, const ITensorInfo *bn_gamma = nullptr,
- float epsilon = 0.001f, FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
+ static Status validate(const ITensorInfo *input_weights,
+ const ITensorInfo *bn_mean,
+ const ITensorInfo *bn_var,
+ const ITensorInfo *fused_weights,
+ const ITensorInfo *fused_bias,
+ const ITensorInfo *input_bias = nullptr,
+ const ITensorInfo *bn_beta = nullptr,
+ const ITensorInfo *bn_gamma = nullptr,
+ float epsilon = 0.001f,
+ FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
deleted file mode 100644
index 9215fd602d..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-using namespace misc::shape_calculator;
-
-namespace
-{
-using ElementsProcessed = Steps;
-
-Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
- if(input0->data_type() == DataType::QASYMM8)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::QASYMM8, DataType::QSYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 != rhs_info.k0);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((lhs_info.k0 & (lhs_info.k0 - 1)) && lhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 1 || lhs_info.m0 > 8);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_info.export_to_cl_image, "Export to CLImage not supported for quantized GEMM");
-
- const int m = gemm_info.m();
- const int n = gemm_info.n();
- const int k = gemm_info.k();
-
- ARM_COMPUTE_UNUSED(m);
- ARM_COMPUTE_UNUSED(n);
- ARM_COMPUTE_UNUSED(k);
-
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != static_cast<unsigned int>(k));
- ARM_COMPUTE_RETURN_ERROR_ON(input1->dimension(0) != static_cast<unsigned int>(n));
- ARM_COMPUTE_RETURN_ERROR_ON(input1->dimension(1) != static_cast<unsigned int>(k));
- if(gemm_info.reinterpret_input_as_3d())
- {
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) * input0->dimension(2) != static_cast<unsigned int>(m));
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) != static_cast<unsigned int>(m));
- }
-
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info, ElementsProcessed &num_elements_processed)
-{
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
- bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0);
-
- Window win{};
- bool window_changed = false;
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_output_as_3d to be false.
- if(reinterpret_input_as_3d == reinterpret_output_as_3d)
- {
- reinterpret_output_as_3d = false;
- }
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info)).set_data_type(DataType::S32));
-
- TensorInfo tmp_info(*output);
-
- if(reinterpret_output_as_3d)
- {
- // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(output->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = rhs_info.n0;
- num_elems_processed_per_iteration_y = lhs_info.m0;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- // RHS matrix still needs padding on the X
- AccessWindowStatic input1_access(input1, 0, 0,
- ceil_to_multiple(input1->dimension(0), num_elems_processed_per_iteration_x),
- input1->dimension(1));
-
- window_changed = update_window_and_padding(win, input1_access); // window used by the execute_window_loop
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-CLGEMMLowpMatrixMultiplyNativeKernel::CLGEMMLowpMatrixMultiplyNativeKernel()
- : _input0(nullptr), _input1(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_input_as_3d(false), _reinterpret_output_as_3d(false), _use_dummy_work_items(false)
-{
-}
-
-void CLGEMMLowpMatrixMultiplyNativeKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, lhs_info, rhs_info, gemm_info);
-}
-
-void CLGEMMLowpMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info));
-
- _input0 = input0;
- _input1 = input1;
- _output = output;
- _reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
- _reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0);
- _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
-
- // We still need padding on the X dimension for the RHS matrix
- auto padding_info = get_padding_info({ input0, output });
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(_reinterpret_input_as_3d == _reinterpret_output_as_3d)
- {
- _reinterpret_input_as_3d = false;
- _reinterpret_output_as_3d = false;
- }
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_input0 = _input0->info()->num_dimensions();
- _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true,
- // we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
- // This means that the actual m used by the kernel is given by output->info()->dimension(1) and not by gemm_info.m
- const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m() : output->info()->dimension(1);
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int partial_store_m0 = internal_m % lhs_info.m0;
- const unsigned int partial_store_n0 = gemm_info.n() % rhs_info.n0;
-
- // Shrink M0 to be always <= M (internal_m) to prevent out-of-bounds reads.
- // NOTE: This might have implications on heuristics and performance
- const unsigned int internal_m0 = std::min(internal_m, lhs_info.m0);
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(1)));
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(2)));
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2)));
- build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option("-DM=" + support::cpp11::to_string(input0->info()->dimension(1)));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n()));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k()));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(internal_m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type()));
- build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_dot8_acc_type_from_data_type(input0->info()->data_type()));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
- std::string kernel_name("gemmlowp_mm_native");
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += dot8_supported(CLKernelLibrary::get().get_device()) ? "_dot8" : "";
- _config_id += "_";
- _config_id += (_reinterpret_input_as_3d ? "3di_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(gemm_info.k());
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.n0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.k0);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMLowpMatrixMultiplyNativeKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info)
-{
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, lhs_info, rhs_info, gemm_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
- input1->clone().get(),
- output->clone().get(),
- lhs_info,
- rhs_info,
- gemm_info,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void CLGEMMLowpMatrixMultiplyNativeKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- if(_input1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(_input1->info()->strides_in_bytes()[3] != 0);
- }
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3;
- const unsigned int total_cross_plane_pad = _input0->info()->padding().top + _input0->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- if(_reinterpret_output_as_3d)
- {
- // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0);
- const unsigned int total_cross_plane_pad = _output->info()->padding().top + _output->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input0, slice);
- add_2D_tensor_argument(idx, _input1, slice_b);
- add_2D_tensor_argument(idx, _output, slice);
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input0->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input1->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[2]));
- enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h
deleted file mode 100644
index 125f0c6948..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYNATIVEKERNEL_H
-#define ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYNATIVEKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to multiply matrices with QASYMM8/QASYMM8_SIGNED data type */
-class CLGEMMLowpMatrixMultiplyNativeKernel : public ICLKernel
-{
-public:
- /** Default Constructor */
- CLGEMMLowpMatrixMultiplyNativeKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMLowpMatrixMultiplyNativeKernel(const CLGEMMLowpMatrixMultiplyNativeKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMLowpMatrixMultiplyNativeKernel &operator=(const CLGEMMLowpMatrixMultiplyNativeKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMLowpMatrixMultiplyNativeKernel(CLGEMMLowpMatrixMultiplyNativeKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMLowpMatrixMultiplyNativeKernel &operator=(CLGEMMLowpMatrixMultiplyNativeKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] input1 Input tensor containing the RHS matrix. Data type supported: same as @p input0
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * @param[in] rhs_info RHS matrix information used to retrieve the number of columns to be processed by each thread
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same as lhs_info.k0
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- */
- void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] input1 Input tensor containing the RHS matrix. Data type supported: same as @p input0
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * @param[in] rhs_info RHS matrix information used to retrieve the number of columns to be processed by each thread
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same as lhs_info.k0
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyNativeKernel
- *
- * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] input1 Input tensor info for the RHS matrix. Data type supported: same as @p input0
- * @param[in] output Output tensor info. Data type supported: S32
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * @param[in] rhs_info RHS matrix information used to retrieve the number of columns to be processed by each thread
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same as lhs_info.k0
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input0;
- const ICLTensor *_input1;
- ICLTensor *_output;
- bool _slide_matrix_b;
- bool _reinterpret_input_as_3d;
- bool _reinterpret_output_as_3d;
- bool _use_dummy_work_items;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYNATIVEKERNEL_H*/
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp
deleted file mode 100644
index 848f272e50..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-using namespace misc::shape_calculator;
-
-namespace
-{
-using ElementsProcessed = Steps;
-
-Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.transpose);
- ARM_COMPUTE_RETURN_ERROR_ON(!rhs_info.transpose);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 != rhs_info.k0);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((lhs_info.k0 & (lhs_info.k0 - 1)) && lhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_info.export_to_cl_image, "Export to CLImage not supported for quantized GEMM");
-
- const int m = gemm_info.m();
- const int n = gemm_info.n();
- const int k = gemm_info.k();
-
- TensorShape tensor_shape0{ input0->tensor_shape() };
- tensor_shape0.set(0, k);
- tensor_shape0.set(1, m);
-
- TensorShape tensor_shape1{ input1->tensor_shape() };
- tensor_shape1.set(0, n);
- tensor_shape1.set(1, k);
-
- const TensorInfo tensor_info0 = input0->clone()->set_tensor_shape(tensor_shape0);
- const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
-
- const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_lhs_reshaped_shape(tensor_info0, lhs_info));
- const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
-
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info, ElementsProcessed &num_elements_processed)
-{
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0);
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info)).set_data_type(DataType::S32));
-
- TensorInfo tmp_info(*output);
- if(reinterpret_output_as_3d)
- {
- // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(output->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = rhs_info.n0;
- num_elems_processed_per_iteration_y = lhs_info.m0;
- Window win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- return std::make_pair(Status{}, collapsed);
-}
-} // namespace
-
-CLGEMMLowpMatrixMultiplyReshapedKernel::CLGEMMLowpMatrixMultiplyReshapedKernel()
- : _input0(nullptr), _input1(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_output_as_3d(false), _k(1), _use_dummy_work_items(false)
-{
-}
-
-void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, lhs_info, rhs_info, gemm_info);
-}
-
-void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info));
-
- _input0 = input0;
- _input1 = input1;
- _output = output;
- _reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0);
- _k = gemm_info.k();
- _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_input0 = _input0->info()->num_dimensions();
- _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
-
- auto padding_info = get_padding_info({ input0, input1, output });
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m() : output->info()->dimension(1);
-
- const unsigned int partial_store_m0 = internal_m % lhs_info.m0;
- const unsigned int partial_store_n0 = gemm_info.n() % rhs_info.n0;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(1)));
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(2)));
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2)));
- build_opts.add_option_if(lhs_info.interleave, "-DLHS_INTERLEAVE");
- build_opts.add_option_if(rhs_info.interleave, "-DRHS_INTERLEAVE");
- build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option("-DM=" + support::cpp11::to_string(gemm_info.m()));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n()));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
- build_opts.add_option("-DV0=" + support::cpp11::to_string(lhs_info.v0));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type()));
- build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_dot8_acc_type_from_data_type(input0->info()->data_type()));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
-
- std::string kernel_name("gemmlowp_mm_reshaped_");
- kernel_name += lhs_info.transpose ? "lhs_t_" : "lhs_nt_";
- kernel_name += rhs_info.transpose ? "rhs_t" : "rhs_nt";
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += dot8_supported(CLKernelLibrary::get().get_device()) ? "_dot8" : "";
- _config_id += "_";
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(gemm_info.k());
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.n0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.k0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.v0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.h0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.interleave);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.interleave);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMLowpMatrixMultiplyReshapedKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info)
-{
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, lhs_info, rhs_info, gemm_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
- input1->clone().get(),
- output->clone().get(),
- lhs_info,
- rhs_info,
- gemm_info,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void CLGEMMLowpMatrixMultiplyReshapedKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- if(_input1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(_input1->info()->strides_in_bytes()[3] != 0);
- }
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- if(_reinterpret_output_as_3d)
- {
- // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 4;
- const unsigned int total_cross_plane_pad = _output->info()->padding().top + _output->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input0, slice);
- add_2D_tensor_argument(idx, _input1, slice_b);
- add_2D_tensor_argument(idx, _output, slice);
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_k));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input0->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input1->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[2]));
- enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h
deleted file mode 100644
index 100100b1b1..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYRESHAPEDKERNEL_H
-#define ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYRESHAPEDKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to multiply matrices when both the input matrices LHS (input0) and RHS (input1) have been reshaped
- *
- * @note The input matrices @p input0 and @p input1 must be reshaped through @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel
- */
-class CLGEMMLowpMatrixMultiplyReshapedKernel : public ICLKernel
-{
-public:
- /** Default Constructor */
- CLGEMMLowpMatrixMultiplyReshapedKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMLowpMatrixMultiplyReshapedKernel(const CLGEMMLowpMatrixMultiplyReshapedKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMLowpMatrixMultiplyReshapedKernel &operator=(const CLGEMMLowpMatrixMultiplyReshapedKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMLowpMatrixMultiplyReshapedKernel(CLGEMMLowpMatrixMultiplyReshapedKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMLowpMatrixMultiplyReshapedKernel &operator=(CLGEMMLowpMatrixMultiplyReshapedKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: QASYMM8/QASYMM8_SIGNED. The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32
- * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.transpose: false
- * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same as lhs_info.k0
- * rhs_info.transpose: true
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @note lhs_info.k0 must be equal to rhs_info.k0
- */
- void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: QASYMM8/QASYMM8_SIGNED. The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32
- * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.transpose: false
- * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same as lhs_info.k0
- * rhs_info.transpose: true
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @note lhs_info.k0 must be equal to rhs_info.k0
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyReshapedKernel
- *
- * @param[in] input0 Input tensor info containing the LHS reshaped matrix. Data type supported: QASYMM8/QASYMM8_SIGNED. The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] input1 Input tensor info containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[in] output Output tensor info. Data type supported: S32
- * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.transpose: false
- * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: 2,3,4,8,16
- * rhs_info.transpose: true
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @note lhs_info.k0 must be equal to rhs_info.k0
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
- const GEMMReshapeInfo &gemm_info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input0;
- const ICLTensor *_input1;
- ICLTensor *_output;
- bool _slide_matrix_b;
- bool _reinterpret_output_as_3d;
- unsigned int _k;
- bool _use_dummy_work_items;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYRESHAPEDKERNEL_H*/
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
deleted file mode 100644
index d39900a561..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
+++ /dev/null
@@ -1,573 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-#include <cstddef>
-#include <cstdint>
-#include <tuple>
-
-using namespace arm_compute::misc::shape_calculator;
-
-namespace arm_compute
-{
-namespace
-{
-using ElementsProcessed = Steps;
-
-Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMKernelInfo &gemm_info,
- const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
- const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
- if(input0->data_type() == DataType::QASYMM8)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::QASYMM8, DataType::QSYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
-
- const GEMMRHSMatrixInfo rhs_info = gemm_info.rhs_info;
- const GEMMLHSMatrixInfo lhs_info = gemm_info.lhs_info;
- const GEMMLowpOutputStageInfo output_stage = gemm_info.output_stage;
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((((rhs_info.k0 & (rhs_info.k0 - 1)) && rhs_info.k0 != 3) || (rhs_info.k0 > 16)), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 1 || lhs_info.m0 > 8);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3) || rhs_info.n0 > 16), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_info.export_to_cl_image, "Export to CLImage not supported for quantized GEMM");
-
- const int m = gemm_info.m;
- const int n = gemm_info.n;
- const int k = gemm_info.k;
-
- TensorShape tensor_shape1{ input1->tensor_shape() };
- tensor_shape1.set(0, n);
- tensor_shape1.set(1, k);
-
- const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
- const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != static_cast<unsigned int>(k));
- if(gemm_info.reinterpret_input_as_3d)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) * input0->dimension(2) != static_cast<unsigned int>(m));
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) != static_cast<unsigned int>(m));
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
-
- const TensorShape expected_output_shape = compute_mm_shape(*input0, *input1, gemm_info);
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(expected_output_shape);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- if(output_stage.type == GEMMLowpOutputStageType::NONE)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, output);
- }
- }
-
- if(bias != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != bias->dimension(0));
- }
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN) || (output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT),
- "Only GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT is supported");
-
- // Checks performed if the output stage needs to be fused
- if(output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
- {
- // If a_offset == 0, vector_sum_col can be a nullptr
- if(gemm_info.a_offset != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != expected_output_shape[0]);
- }
-
- // If b_offset == 0, vector_sum_row can be a nullptr
- if(gemm_info.b_offset != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
-
- // Check if mm result is a 3D reinterpretation
- const bool reinterpret_as_3d = expected_output_shape.num_dimensions() > 1 && expected_output_shape.y() != vector_sum_row->tensor_shape().x();
-
- // Validate input
- ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (expected_output_shape[1] * expected_output_shape[2]));
- ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != expected_output_shape[1]);
-
- if(expected_output_shape.num_dimensions() > 1)
- {
- const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
-
- TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
- vector_sum_row_shape.collapse_from(1);
- TensorShape collapsed_output_shape(expected_output_shape);
- collapsed_output_shape.collapse_from(output_batch_idx);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != collapsed_output_shape[output_batch_idx],
- "vector_sum_row must have the same number of batches of output tensor");
-
- if(gemm_info.a_offset != 0)
- {
- TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
- vector_sum_col_shape.collapse_from(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
- "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
- }
- }
- }
-
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(output_stage.output_data_type != output->data_type());
- }
- ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
-
- if(output_multipliers != nullptr && output_shifts != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
- if(output_stage.is_quantized_per_channel)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != output_shifts->dimension(0));
- ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != output_multipliers->dimension(0));
- }
- }
- }
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *output, const GEMMKernelInfo &gemm_info,
- ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, ITensorInfo *bias,
- ITensorInfo *output_multipliers, ITensorInfo *output_shifts, ElementsProcessed &num_elements_processed)
-{
- const GEMMLowpOutputStageInfo output_stage = gemm_info.output_stage;
-
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d != 0);
-
- Window win{};
- Window win_out{};
- bool window_changed = false;
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(reinterpret_input_as_3d == reinterpret_output_as_3d)
- {
- reinterpret_output_as_3d = false;
- }
-
- // Output tensor auto initialization if not yet initialized
- const TensorShape expected_output_shape = compute_mm_shape(*input0, *input1, gemm_info);
- if(output_stage.type != GEMMLowpOutputStageType::NONE)
- {
- auto_init_if_empty(*output, input0->clone()->set_tensor_shape(expected_output_shape).set_data_type(output_stage.output_data_type));
- }
- else
- {
- auto_init_if_empty(*output, input0->clone()->set_tensor_shape(expected_output_shape).set_data_type(DataType::S32));
- }
-
- TensorInfo tmp_info(*output);
-
- if(reinterpret_output_as_3d)
- {
- // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(output->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = gemm_info.rhs_info.n0;
- num_elems_processed_per_iteration_y = gemm_info.lhs_info.m0;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- if(output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
- {
- if(gemm_info.a_offset != 0)
- {
- AccessWindowHorizontal vector_sum_col_access(vector_sum_col, 0, num_elems_processed_per_iteration_x);
- window_changed = window_changed || update_window_and_padding(win_out, vector_sum_col_access);
- }
- // No access window needed for vector_sum_row
- ARM_COMPUTE_UNUSED(vector_sum_row);
-
- if(bias != nullptr)
- {
- AccessWindowHorizontal bias_access(bias, 0, num_elems_processed_per_iteration_x);
- window_changed = window_changed || update_window_and_padding(win_out, bias_access);
- }
-
- if(output_multipliers != nullptr && output_multipliers->dimension(0) > 1)
- {
- AccessWindowHorizontal output_multipliers_access(output_multipliers, 0, num_elems_processed_per_iteration_x);
- AccessWindowHorizontal output_shifts_access(output_shifts, 0, num_elems_processed_per_iteration_x);
- window_changed = window_changed || update_window_and_padding(win_out, output_multipliers_access, output_shifts_access);
- }
- }
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel()
- : _input0(nullptr),
- _input1(nullptr),
- _output(nullptr),
- _vector_sum_col(nullptr),
- _vector_sum_row(nullptr),
- _bias(nullptr),
- _output_multipliers(nullptr),
- _output_shifts(nullptr),
- _slide_matrix_b(true),
- _reinterpret_input_as_3d(false),
- _reinterpret_output_as_3d(false),
- _use_dummy_work_items(false),
- _is_quantized_per_channel(false),
- _fuse_output_stage(false)
-{
-}
-
-void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info,
- const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, gemm_info, vector_sum_col, vector_sum_row, bias, output_multipliers, output_shifts);
-}
-
-void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output,
- const GEMMKernelInfo &gemm_info,
- const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(),
- input1->info(),
- output->info(),
- gemm_info,
- vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
- vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
- bias != nullptr ? bias->info() : nullptr,
- output_multipliers != nullptr ? output_multipliers->info() : nullptr,
- output_shifts != nullptr ? output_shifts->info() : nullptr));
-
- auto padding_info = get_padding_info({ input0, input1, output, vector_sum_row });
- const GEMMRHSMatrixInfo rhs_info = gemm_info.rhs_info;
- const GEMMLHSMatrixInfo lhs_info = gemm_info.lhs_info;
- const GEMMLowpOutputStageInfo output_stage = gemm_info.output_stage;
- const int32_t a_offset = gemm_info.a_offset;
- const int32_t b_offset = gemm_info.b_offset;
-
- _input0 = input0;
- _input1 = input1;
- _output = output;
- _vector_sum_col = vector_sum_col;
- _vector_sum_row = vector_sum_row;
- _bias = bias;
- _output_multipliers = output_multipliers;
- _output_shifts = output_shifts;
- _reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- _reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d != 0);
- _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
- _is_quantized_per_channel = output_stage.is_quantized_per_channel;
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(_reinterpret_input_as_3d == _reinterpret_output_as_3d)
- {
- _reinterpret_input_as_3d = false;
- _reinterpret_output_as_3d = false;
- }
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_input0 = _input0->info()->num_dimensions();
- _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input0->info(),
- input1->info(),
- output->info(),
- gemm_info,
- vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
- vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
- bias != nullptr ? bias->info() : nullptr,
- output_multipliers != nullptr ? output_multipliers->info() : nullptr,
- output_shifts != nullptr ? output_shifts->info() : nullptr,
- num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true,
- // we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
- // This means that the actual m used by the kernel is given by output->info()->dimension(1) and not by gemm_info.m
- const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m : output->info()->dimension(1);
-
- // Shrink M0 to be always <= M (internal_m) to prevent out-of-bounds reads.
- // NOTE: This might have implications on heuristics and performance
- const unsigned int internal_m0 = std::min(internal_m, lhs_info.m0);
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int partial_store_m0 = internal_m % internal_m0;
- const unsigned int partial_store_n0 = gemm_info.n % rhs_info.n0;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(1)));
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(2)));
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2)));
- build_opts.add_option_if(rhs_info.interleave, "-DRHS_INTERLEAVE");
- build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(internal_m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type()));
- build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_dot8_acc_type_from_data_type(input0->info()->data_type()));
-
- std::string kernel_name("gemmlowp_mm_reshaped_only_rhs_");
- kernel_name += rhs_info.transpose ? "t" : "nt";
-
- if(output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
- {
- kernel_name += "_fused_output_stage_fixedpoint";
- _fuse_output_stage = true;
- // If a_offset == 0, vector_sum_col can be a nullptr
- if(a_offset != 0 && vector_sum_col != nullptr)
- {
- build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
- build_opts.add_option_if(vector_sum_col->info()->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
- }
- // If b_offset == 0, vector_sum_row can be a nullptr
- build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
- build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * input0->info()->dimension(0)));
- build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
- build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset));
- build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multipliers[0]));
- build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shifts[0]));
- build_opts.add_option_if(_is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
-
- const int min = output_stage.gemmlowp_min_bound;
- const int max = output_stage.gemmlowp_max_bound;
-
- PixelValue min_val{};
- PixelValue max_val{};
- std::tie(min_val, max_val) = get_min_max(output->info()->data_type());
- build_opts.add_option_if(min != min_val.get<int32_t>(), "-DMIN_BOUND=" + support::cpp11::to_string(min));
- build_opts.add_option_if(max != max_val.get<int32_t>(), "-DMAX_BOUND=" + support::cpp11::to_string(max));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += dot8_supported(CLKernelLibrary::get().get_device()) ? "_dot8" : "";
- _config_id += "_";
- _config_id += (_reinterpret_input_as_3d ? "3di_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(gemm_info.k);
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.n0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.k0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.h0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.interleave);
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMKernelInfo &gemm_info,
- const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
- const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, gemm_info, vector_sum_col, vector_sum_row, bias, output_multipliers, output_shifts));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
- input1->clone().get(),
- output->clone().get(),
- gemm_info,
- vector_sum_col != nullptr ? vector_sum_col->clone().get() : nullptr,
- vector_sum_row != nullptr ? vector_sum_row->clone().get() : nullptr,
- bias != nullptr ? bias->clone().get() : nullptr,
- output_multipliers != nullptr ? output_multipliers->clone().get() : nullptr,
- output_shifts != nullptr ? output_shifts->clone().get() : nullptr,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- if(_input1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(_input1->info()->strides_in_bytes()[3] != 0);
- }
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3;
- const unsigned int total_cross_plane_pad = _input0->info()->padding().top + _input0->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- if(_reinterpret_output_as_3d)
- {
- // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0);
- const unsigned int total_cross_plane_pad = _output->info()->padding().top + _output->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- // Set window for vector_sum_col
- Window win_vector_sum_col = slice;
- win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
- win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- // Set window for vector_sum_row
- Window win_vector_sum_row = slice;
- win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
- win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
- win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- Window biases_slice = slice;
- biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
- biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input0, slice);
- add_2D_tensor_argument(idx, _input1, slice_b);
- add_2D_tensor_argument(idx, _output, slice);
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input0->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input1->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[2]));
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
- idx++;
- }
-
- if(_reinterpret_output_as_3d)
- {
- // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor
- idx++;
- }
-
- if(_fuse_output_stage)
- {
- add_2D_tensor_argument_if((_vector_sum_col != nullptr), idx, _vector_sum_col, win_vector_sum_col);
- add_2D_tensor_argument_if((_vector_sum_row != nullptr), idx, _vector_sum_row, win_vector_sum_row);
- add_1D_tensor_argument_if((_bias != nullptr), idx, _bias, biases_slice);
- add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_multipliers, biases_slice);
- add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_shifts, biases_slice);
- }
- enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h
deleted file mode 100644
index 222a8615e4..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYRESHAPEDONLYRHSKERNEL_H
-#define ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYRESHAPEDONLYRHSKERNEL_H
-
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to multiply matrices with QASYMM8 data type when only the input matrix RHS (input1) has been reshaped
- *
- * @note The input matrix input1 must be reshaped through @ref CLGEMMReshapeRHSMatrixKernel
- * @note For fused output stage, only GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT type is supported
- */
-class CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel : public ICLKernel
-{
-public:
- /** Default Constructor */
- CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel(const CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel &operator=(const CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel &operator=(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
- * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/S32.
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices, output stage information and RHS/LHS info.
- * Only the following values are supported for LHS info:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * Only the following values are supported for RHS info:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same as lhs_info.k0
- * rhs_info.transpose: true
- * @param[in] vector_sum_col (Optional) Input row-vector of sums of all the entries in each column of matrix B.
- * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: S32
- * @param[in] vector_sum_row (Optional) Input row-vector of sums of all the entries in each row of matrix A.
- * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: S32
- * @param[in] bias (Optional) Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: S32.
- * @param[in] output_multipliers (Optional) Output multipliers tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32.
- * @param[in] output_shifts (Optional) Output shifts tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32.
- */
- void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info, const ICLTensor *vector_sum_col = nullptr,
- const ICLTensor *vector_sum_row = nullptr, const ICLTensor *bias = nullptr, const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0
- * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/S32.
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices, output stage information and RHS/LHS info.
- * Only the following values are supported for LHS info:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * Only the following values are supported for RHS info:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same as lhs_info.k0
- * rhs_info.transpose: true
- * @param[in] vector_sum_col (Optional) Input row-vector of sums of all the entries in each column of matrix B.
- * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: S32
- * @param[in] vector_sum_row (Optional) Input row-vector of sums of all the entries in each row of matrix A.
- * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: S32
- * @param[in] bias (Optional) Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: S32.
- * @param[in] output_multipliers (Optional) Output multipliers tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32.
- * @param[in] output_shifts (Optional) Output shifts tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info, const ICLTensor *vector_sum_col = nullptr,
- const ICLTensor *vector_sum_row = nullptr, const ICLTensor *bias = nullptr, const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel
- *
- * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] input1 Input tensor info for the RHS reshaped matrix. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
- * @param[in] output Output tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/S32.
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices, output stage information and RHS/LHS info.
- * Only the following values are supported for LHS info:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * Only the following values are supported for RHS info:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same as lhs_info.k0
- * rhs_info.transpose: true
- * @param[in] vector_sum_col (Optional) Input row-vector info of sums of all the entries in each column of matrix B.
- * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: S32
- * @param[in] vector_sum_row (Optional) Input row-vector info of sums of all the entries in each row of matrix A.
- * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: S32
- * @param[in] bias (Optional) Biases tensor info. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: S32.
- * @param[in] output_multipliers (Optional) Output multipliers tensor info. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32.
- * @param[in] output_shifts (Optional) Output shifts tensor info. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMKernelInfo &gemm_info, const ITensorInfo *vector_sum_col = nullptr,
- const ITensorInfo *vector_sum_row = nullptr, const ITensorInfo *bias = nullptr, const ITensorInfo *output_multipliers = nullptr,
- const ITensorInfo *output_shifts = nullptr);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input0;
- const ICLTensor *_input1;
- ICLTensor *_output;
- const ICLTensor *_vector_sum_col;
- const ICLTensor *_vector_sum_row;
- const ICLTensor *_bias;
- const ICLTensor *_output_multipliers;
- const ICLTensor *_output_shifts;
- bool _slide_matrix_b;
- bool _reinterpret_input_as_3d;
- bool _reinterpret_output_as_3d;
- bool _use_dummy_work_items;
- bool _is_quantized_per_channel;
- bool _fuse_output_stage;
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYRESHAPEDONLYRHSKERNEL_H */ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
deleted file mode 100644
index c7844b9c28..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
- int32_t a_offset, int32_t b_offset)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
-
- if(bias != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
- }
-
- // If a_offset == 0, vector_sum_col can be a nullptr
- if(a_offset != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
- }
-
- // If b_offset == 0, vector_sum_row can be a nullptr
- if(b_offset != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
-
- // Check if input is a 3D reinterpretation
- const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
-
- // Validate input
- ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
- ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
-
- TensorShape output_shape = mm_result->tensor_shape();
- if(output_shape.num_dimensions() > 1)
- {
- const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
-
- TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
- vector_sum_row_shape.collapse_from(1);
- output_shape.collapse_from(output_batch_idx);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
- "mm_result tensor must have the same number of batches of output tensor");
-
- if(a_offset != 0)
- {
- TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
- vector_sum_col_shape.collapse_from(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
- "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
- }
- }
- }
-
- return Status{};
-}
-} // namespace
-
-CLGEMMLowpOffsetContributionKernel::CLGEMMLowpOffsetContributionKernel()
- : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _mm_result(nullptr), _bias(nullptr)
-{
-}
-
-void CLGEMMLowpOffsetContributionKernel::configure(ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset,
- int32_t b_offset)
-{
- configure(CLKernelLibrary::get().get_compile_context(), mm_result, vector_sum_col, vector_sum_row, bias, k, a_offset, b_offset);
-}
-
-void CLGEMMLowpOffsetContributionKernel::configure(const CLCompileContext &compile_context, ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row,
- const ICLTensor *bias,
- int32_t k, int32_t a_offset,
- int32_t b_offset)
-{
- // Perform validate step
- ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
- vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
- vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
- bias != nullptr ? bias->info() : nullptr,
- a_offset, b_offset)); // NOLINT
-
- auto padding_info = get_padding_info({ mm_result, vector_sum_col, vector_sum_row, bias });
-
- _vector_sum_col = vector_sum_col;
- _vector_sum_row = vector_sum_row;
- _mm_result = mm_result;
- _bias = bias;
-
- // Check if input is a 3D reinterpretation
- const bool reinterpret_as_3d = vector_sum_row != nullptr
- && mm_result->info()->num_dimensions() > 1
- && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, mm_result->info()->dimension(0));
-
- // Set the arguments to pass at compile time
- CLBuildOptions build_opts;
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(mm_result->info()->dimension(0) % num_elems_processed_per_iteration));
-
- // If a_offset == 0, vector_sum_col can be a nullptr
- if(a_offset != 0)
- {
- build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
- build_opts.add_option_if(vector_sum_col->info()->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
- }
- // If b_offset == 0, vector_sum_row can be a nullptr
- build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
- build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * k));
- build_opts.add_option_if(reinterpret_as_3d, "-DHEIGHT_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(1)));
- build_opts.add_option_if(reinterpret_as_3d, "-DDEPTH_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(2)));
- build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
-
- std::string kernel_name("gemmlowp_offset_contribution");
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*mm_result->info(), Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win);
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name + "_";
- _config_id += support::cpp11::to_string(mm_result->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(mm_result->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(mm_result->info()->dimension(2));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMLowpOffsetContributionKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
- int32_t a_offset, int32_t b_offset)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, a_offset, b_offset));
- return Status{};
-}
-
-void CLGEMMLowpOffsetContributionKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- // Set window for vector_sum_col
- Window win_vector_sum_col = slice;
- win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
- win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- // Set window for vector_sum_row
- Window win_vector_sum_row = slice;
- win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
- win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
- win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- Window biases_slice = slice;
- biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
- biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _mm_result, slice);
- add_2D_tensor_argument_if((_vector_sum_col != nullptr), idx, _vector_sum_col, win_vector_sum_col);
- add_2D_tensor_argument_if((_vector_sum_row != nullptr), idx, _vector_sum_row, win_vector_sum_row);
- add_1D_tensor_argument_if((_bias != nullptr), idx, _bias, biases_slice);
-
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h
deleted file mode 100644
index f8705595a0..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H
-#define ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel used to add the offset contribution after the matrix multiplication. The computation is performed in-place
- *
- * This kernel takes a final int32 accumulator value (the output of the matrix multiplication),
- * and adds to it the offset contribution of matrix A and matrix B in-place.
- *
- * The final result is:
- *
- * mm_result[i][k] = mm_result[i][k] +
- * (vector_sum_col[k] * a_offset) +
- * (vector_sum_row[i] * b_offset) +
- * (a_offset * b_offset * k)
- *
- */
-class CLGEMMLowpOffsetContributionKernel : public ICLKernel
-{
-public:
- /** Constructor */
- CLGEMMLowpOffsetContributionKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpOffsetContributionKernel(const CLGEMMLowpOffsetContributionKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpOffsetContributionKernel &operator=(const CLGEMMLowpOffsetContributionKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMLowpOffsetContributionKernel(CLGEMMLowpOffsetContributionKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMLowpOffsetContributionKernel &operator=(CLGEMMLowpOffsetContributionKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in, out] mm_result Input tensor containing the result of the matrix multiplication. Data type supported: S32
- * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
- * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
- * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
- * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] k Number of matrix A columns or Matrix B rows
- * @param[in] a_offset Offset to be added to each element of the matrix A.
- * @param[in] b_offset Offset to be added to each element of the matrix B.
- */
- void configure(ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset, int32_t b_offset);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in, out] mm_result Input tensor containing the result of the matrix multiplication. Data type supported: S32
- * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
- * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
- * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
- * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] k Number of matrix A columns or Matrix B rows
- * @param[in] a_offset Offset to be added to each element of the matrix A.
- * @param[in] b_offset Offset to be added to each element of the matrix B.
- */
- void configure(const CLCompileContext &compile_context, ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset,
- int32_t b_offset);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpOffsetContributionKernel
- *
- * @param[in] mm_result Input tensor containing the result of @ref CLGEMMLowpOffsetContributionKernel. Data type supported: S32
- * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
- * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
- * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
- * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] a_offset Offset to be added to each element of the matrix A.
- * @param[in] b_offset Offset to be added to each element of the matrix B.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, int32_t a_offset, int32_t b_offset);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_vector_sum_col;
- const ICLTensor *_vector_sum_row;
- ICLTensor *_mm_result;
- const ICLTensor *_bias;
-};
-} // namespace arm_compute
-
-#endif /* ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H */
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
deleted file mode 100644
index b41d8704bd..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
- int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
-
- if(bias != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
- }
-
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
- if(output_stage.is_quantized_per_channel)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_shifts->dimension(0));
- ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_multipliers->dimension(0));
- }
-
- // If a_offset == 0, vector_sum_col can be a nullptr
- if(a_offset != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
- }
-
- // If b_offset == 0, vector_sum_row can be a nullptr
- if(b_offset != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
-
- // Check if input is a 3D reinterpretation
- const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
-
- // Validate input
- ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
- ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
-
- TensorShape output_shape = mm_result->tensor_shape();
- if(output_shape.num_dimensions() > 1)
- {
- const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
-
- TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
- vector_sum_row_shape.collapse_from(1);
- output_shape.collapse_from(output_batch_idx);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
- "mm_result tensor must have the same number of batches of output tensor");
-
- if(a_offset != 0)
- {
- TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
- vector_sum_col_shape.collapse_from(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
- "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
- }
- }
- }
-
- ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type == GEMMLowpOutputStageType::NONE);
- // Checks performed when output is configured
- if((output != nullptr) && (output->total_size() != 0))
- {
- ARM_COMPUTE_RETURN_ERROR_ON(output_stage.output_data_type != output->data_type());
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
- }
-
- ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output_stage.gemmlowp_multipliers.size() != output_stage.gemmlowp_shifts.size(), "per channel quantization info is incorrect");
-
- return Status{};
-}
-} // namespace
-
-CLGEMMLowpOffsetContributionOutputStageKernel::CLGEMMLowpOffsetContributionOutputStageKernel()
- : _mm_result(nullptr),
- _vector_sum_col(nullptr),
- _vector_sum_row(nullptr),
- _bias(nullptr),
- _output(nullptr),
- _output_multipliers(nullptr),
- _output_shifts(nullptr),
- _is_quantized_per_channel(false)
-{
-}
-
-void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output,
- int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- configure(CLKernelLibrary::get().get_compile_context(), mm_result, vector_sum_col, vector_sum_row, bias, output, k, a_offset, b_offset, output_stage, output_multipliers, output_shifts);
-}
-
-void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const CLCompileContext &compile_context, const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row,
- const ICLTensor *bias, ICLTensor *output,
- int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- // Perform validate step
- ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output, output_multipliers, output_shifts);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
- vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
- vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
- bias != nullptr ? bias->info() : nullptr,
- output->info(),
- a_offset, b_offset, output_stage,
- output_multipliers->info(), output_shifts->info())); // NOLINT
-
- auto padding_info = get_padding_info({ mm_result, vector_sum_col, vector_sum_row, bias, output, output_multipliers, output_shifts });
-
- const int min = output_stage.gemmlowp_min_bound;
- const int max = output_stage.gemmlowp_max_bound;
-
- _vector_sum_col = vector_sum_col;
- _vector_sum_row = vector_sum_row;
- _mm_result = mm_result;
- _bias = bias;
- _output = output;
- _output_multipliers = output_multipliers;
- _output_shifts = output_shifts;
- _is_quantized_per_channel = output_stage.is_quantized_per_channel;
-
- // Check if input is a 3D reinterpretation
- const bool reinterpret_as_3d = vector_sum_row != nullptr
- && mm_result->info()->num_dimensions() > 1
- && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
-
- // Auto initialize the output
- auto_init_if_empty(*output->info(), mm_result->info()->clone()->set_data_type(output_stage.output_data_type));
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, mm_result->info()->dimension(0));
-
- // Set the arguments to pass at compile time
- CLBuildOptions build_opts;
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(mm_result->info()->dimension(0) % num_elems_processed_per_iteration));
-
- // If a_offset == 0, vector_sum_col can be a nullptr
- if(a_offset != 0)
- {
- build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
- build_opts.add_option_if(vector_sum_col->info()->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
- }
- // If b_offset == 0, vector_sum_row can be a nullptr
- build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
- build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * k));
- build_opts.add_option_if(reinterpret_as_3d, "-DHEIGHT_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(1)));
- build_opts.add_option_if(reinterpret_as_3d, "-DDEPTH_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(2)));
- build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
- build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset));
- build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multipliers[0]));
- build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shifts[0]));
- build_opts.add_option_if(_is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
- build_opts.add_option("-DOUTPUT_DATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
-
- PixelValue min_val{};
- PixelValue max_val{};
- std::tie(min_val, max_val) = get_min_max(output->info()->data_type());
- build_opts.add_option_if((min > min_val.get<int32_t>()), "-DMIN_BOUND=" + support::cpp11::to_string(min));
- build_opts.add_option_if((max < max_val.get<int32_t>()), "-DMAX_BOUND=" + support::cpp11::to_string(max));
-
- std::string kernel_name("gemmlowp_offset_contribution");
- kernel_name += "_" + string_from_gemmlowp_output_stage(output_stage.type);
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*mm_result->info(), Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win);
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name + "_";
- _config_id += support::cpp11::to_string(mm_result->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(mm_result->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(mm_result->info()->dimension(2));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
- const ITensorInfo *output, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
- const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage, output_multipliers, output_shifts));
- return Status{};
-}
-
-void CLGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- // Set window for vector_sum_col
- Window win_vector_sum_col = slice;
- win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
- win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- // Set window for vector_sum_row
- Window win_vector_sum_row = slice;
- win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
- win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
- win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- Window biases_slice = slice;
- biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
- biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _mm_result, slice);
- add_2D_tensor_argument_if((_vector_sum_col != nullptr), idx, _vector_sum_col, win_vector_sum_col);
- add_2D_tensor_argument_if((_vector_sum_row != nullptr), idx, _vector_sum_row, win_vector_sum_row);
- add_1D_tensor_argument_if((_bias != nullptr), idx, _bias, biases_slice);
- add_3D_tensor_argument(idx, _output, slice);
- add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_multipliers, biases_slice);
- add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_shifts, biases_slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h
deleted file mode 100644
index 15f54d17a5..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H
-#define ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel used to add the offset contribution after the matrix multiplication and perform the output stage.
- *
- * This kernel takes a final int32 accumulator value (the output of the matrix multiplication), adds to it the offset contribution
- * of matrix A and matrix B and performs the output stage defined by the output_stage argument
- *
- * @note For quantized computations the output data type for auto-initialization must be passed as part of the @ref GEMMLowpOutputStageInfo.
- */
-class CLGEMMLowpOffsetContributionOutputStageKernel : public ICLKernel
-{
-public:
- /** Constructor */
- CLGEMMLowpOffsetContributionOutputStageKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpOffsetContributionOutputStageKernel(const CLGEMMLowpOffsetContributionOutputStageKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpOffsetContributionOutputStageKernel &operator=(const CLGEMMLowpOffsetContributionOutputStageKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMLowpOffsetContributionOutputStageKernel(CLGEMMLowpOffsetContributionOutputStageKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMLowpOffsetContributionOutputStageKernel &operator=(CLGEMMLowpOffsetContributionOutputStageKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] mm_result Input tensor containing the result of the matrix multiplication. Data type supported: S32
- * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
- * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
- * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
- * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED.
- * @param[in] k Number of matrix A columns or Matrix B rows
- * @param[in] a_offset Offset to be added to each element of the matrix A.
- * @param[in] b_offset Offset to be added to each element of the matrix B.
- * @param[in] output_stage GEMMLowp output stage info
- * @param[in] output_multipliers Output multipliers tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32
- * @param[in] output_shifts Output shifts tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32
- */
- void configure(const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output, int32_t k, int32_t a_offset, int32_t b_offset,
- const GEMMLowpOutputStageInfo &output_stage, const ICLTensor *output_multipliers, const ICLTensor *output_shifts);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] mm_result Input tensor containing the result of the matrix multiplication. Data type supported: S32
- * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
- * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
- * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
- * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED.
- * @param[in] k Number of matrix A columns or Matrix B rows
- * @param[in] a_offset Offset to be added to each element of the matrix A.
- * @param[in] b_offset Offset to be added to each element of the matrix B.
- * @param[in] output_stage GEMMLowp output stage info
- * @param[in] output_multipliers Output multipliers tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32
- * @param[in] output_shifts Output shifts tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output,
- int32_t k,
- int32_t a_offset, int32_t b_offset,
- const GEMMLowpOutputStageInfo &output_stage, const ICLTensor *output_multipliers, const ICLTensor *output_shifts);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpOffsetContributionKernel
- *
- * @param[in] mm_result Input tensor containing the result of @ref CLGEMMLowpOffsetContributionKernel. Data type supported: S32
- * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
- * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
- * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
- * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED.
- * @param[in] a_offset Offset to be added to each element of the matrix A.
- * @param[in] b_offset Offset to be added to each element of the matrix B.
- * @param[in] output_stage GEMMLowp output stage info
- * @param[in] output_multipliers Output multipliers tensor info. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32
- * @param[in] output_shifts Output shifts tensor info. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM).
- * Supported data types: S32
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, int32_t a_offset,
- int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_mm_result;
- const ICLTensor *_vector_sum_col;
- const ICLTensor *_vector_sum_row;
- const ICLTensor *_bias;
- ICLTensor *_output;
- const ICLTensor *_output_multipliers;
- const ICLTensor *_output_shifts;
- bool _is_quantized_per_channel;
-};
-} // namespace arm_compute
-
-#endif /* ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H */
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp
deleted file mode 100644
index 6a58d5e202..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *info)
-{
- ARM_COMPUTE_UNUSED(info);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
-
- // Check biases if exist
- if(bias != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
- ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
- }
-
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() != info->output_data_type, "Mismatching output data type");
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- }
-
- return Status{};
-}
-} // namespace
-
-CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel()
- : _input(nullptr), _bias(nullptr), _output(nullptr)
-{
-}
-
-Status CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
- const GEMMLowpOutputStageInfo *info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, info));
-
- return Status{};
-}
-
-void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
- const GEMMLowpOutputStageInfo *info)
-{
- // Perform validate step
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info));
-
- auto padding_info = get_padding_info({ input, bias, output });
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(info->output_data_type));
-
- _input = input;
- _bias = bias;
- _output = output;
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, input->info()->dimension(0));
-
- // Set the arguments to pass at compile time
- auto min = info->gemmlowp_min_bound;
- auto max = info->gemmlowp_max_bound;
- CLBuildOptions build_opts;
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration));
- build_opts.add_option("-DRESULT_OFFSET_AFTER_SHIFT=" + support::cpp11::to_string(info->gemmlowp_offset));
- build_opts.add_option("-DRESULT_FIXEDPOINT_MULTIPLIER=" + support::cpp11::to_string(info->gemmlowp_multiplier));
- build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(info->gemmlowp_shift));
- build_opts.add_option("-DOUTPUT_DATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
- build_opts.add_option_if((min > std::get<0>(quantization::get_min_max_values_from_quantized_data_type(info->output_data_type))) && (min != max),
- "-DMIN_BOUND=" + support::cpp11::to_string(min));
- build_opts.add_option_if((max < std::get<1>(quantization::get_min_max_values_from_quantized_data_type(info->output_data_type))) && (min != max),
- "-DMAX_BOUND=" + support::cpp11::to_string(max));
- build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
-
- // Create kernel
- const std::string kernel_name = (info->output_data_type == DataType::QSYMM16) ? "gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16" : "gemmlowp_output_stage_quantize_down_fixedpoint";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- auto win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- // Create input window
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- // Setup bias slice
- unsigned int idx1 = num_arguments_per_3D_tensor();
- if(_bias != nullptr)
- {
- Window biases_slice(slice);
- biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
- biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
- add_1D_tensor_argument(idx1, _bias, biases_slice);
- }
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx1, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.h b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.h
deleted file mode 100644
index 8653102cd8..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEBYFIXEDPOINTKERNEL_H
-#define ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEBYFIXEDPOINTKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED/QSYMM16
- *
- * This kernel takes a final int32 accumulator value (the output of the matrix multiplication), and processes it to obtain the final quantized value.
- * The following computations will be performed by the kernel:
- *
- * -# Compute fixed point multiplication between each entry of input by gemmlowp_multiplier
- * -# Add bias to final result if bias tensor is not a nullptr
- * -# Round to nearest division by a power-of-two using result_shift
- * -# Add offset to each result
- * -# Clamp the value between the specified min and max bounds
- * -# Clamp the resulting int32 values to the proper quantized range and cast to QASYMM8/QASYMM8_SIGNED/QSYMM16.
- */
-class CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel : public ICLKernel
-{
-public:
- /** Constructor */
- CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel(const CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel &operator=(const CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel(CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel &operator=(CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM16.
- * @param[in] info Output stage info. Used to pass the quantized output data type
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
- *
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] output Output tensor. Data type supported: Data type supported: QSYMM8/QASYMM8_SIGNED/QSYMM16.
- * @param[in] info Output stage info. Used to pass the quantized output data type
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- const ICLTensor *_bias;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEBYFIXEDPOINTKERNEL_H */
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
deleted file mode 100644
index a5888a5ded..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
- const GEMMLowpOutputStageInfo *info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON((info->output_data_type != DataType::QASYMM8) && (info->output_data_type != DataType::QASYMM8_SIGNED));
- ARM_COMPUTE_RETURN_ERROR_ON(info->gemmlowp_max_bound > std::get<1>(quantization::get_min_max_values_from_quantized_data_type(info->output_data_type)));
- ARM_COMPUTE_RETURN_ERROR_ON(info->gemmlowp_min_bound < std::get<0>(quantization::get_min_max_values_from_quantized_data_type(info->output_data_type))
- || info->gemmlowp_min_bound > info->gemmlowp_max_bound);
-
- // Check biases if exist
- if(bias != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
- ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
- }
-
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() != info->output_data_type, "Mismatching output data type");
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- }
-
- return Status{};
-}
-} // namespace
-
-class Coordinates;
-CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel()
- : _input(nullptr), _bias(nullptr), _output(nullptr)
-{
-}
-
-Status CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
- const GEMMLowpOutputStageInfo *info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, info));
-
- return Status{};
-}
-
-void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
- const GEMMLowpOutputStageInfo *info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, info);
-}
-
-void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
- const GEMMLowpOutputStageInfo *info)
-{
- // Perform validate step
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info));
-
- auto padding_info = get_padding_info({ input, bias, output });
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(info->output_data_type));
-
- _input = input;
- _bias = bias;
- _output = output;
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, input->info()->dimension(0));
-
- auto min = info->gemmlowp_min_bound;
- auto max = info->gemmlowp_max_bound;
-
- // Set the arguments to pass at compile time
- CLBuildOptions build_opts;
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration));
- build_opts.add_option("-DREAL_MULTIPLIER=" + float_to_string_with_full_precision(info->gemmlowp_real_multiplier));
- build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(info->gemmlowp_offset));
- build_opts.add_option("-DOUTPUT_DATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
- build_opts.add_option_if((min > 0), "-DMIN_BOUND=" + support::cpp11::to_string(min));
- build_opts.add_option_if((max < 255), "-DMAX_BOUND=" + support::cpp11::to_string(max));
- build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
-
- // Create kernel
- _kernel = create_kernel(compile_context, "gemmlowp_output_stage_quantize_down_float", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- // Create input window
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- // Setup bias slice
- unsigned int idx1 = num_arguments_per_3D_tensor();
- if(_bias != nullptr)
- {
- Window biases_slice(slice);
- biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
- biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
- add_1D_tensor_argument(idx1, _bias, biases_slice);
- }
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx1, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h
deleted file mode 100644
index 0a8d5e1942..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEBYFLOATKERNEL_H
-#define ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEBYFLOATKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-// Forward declarations
-class ICLTensor;
-
-/** OpenCL kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
- *
- * This kernel takes a final int32 accumulator value (the output of the matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
- * The following computations will be performed by the kernel:
- *
- * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
- * -# Add bias to final result if bias tensor is not a nullptr
- * -# Requantize
- * -# Add offset to each result
- * -# Clamp the value between the specified min and max bounds
- * -# Clamp the resulting int32 values to
- * - to the [0..255] range and cast to QASYMM8.
- * - to the [-128..127] range and cast to QASYMM8_SIGNED.
- */
-class CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel : public ICLKernel
-{
-public:
- /** Constructor */
- CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel(const CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &operator=(const CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel(CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &operator=(CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] info Output stage info. Used to pass the quantized output data type
- */
- void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *info);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] info Output stage info. Used to pass the quantized output data type
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel
- *
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] info Output stage info. Used to pass the quantized output data type
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- const ICLTensor *_bias;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEBYFLOATKERNEL_H */
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
deleted file mode 100644
index 7d4352479c..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON((output_stage->output_data_type != DataType::QASYMM8) && (output_stage->output_data_type != DataType::QASYMM8_SIGNED));
- ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_max_bound > std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)));
- ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_min_bound < std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))
- || output_stage->gemmlowp_min_bound > output_stage->gemmlowp_max_bound);
-
- // Check biases if exist
- if(bias != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
- ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
- }
-
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() != output_stage->output_data_type, "Mismatching output data type");
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- }
-
- return Status{};
-}
-} //namespace
-
-CLGEMMLowpQuantizeDownInt32ScaleKernel::CLGEMMLowpQuantizeDownInt32ScaleKernel()
- : _input(nullptr), _bias(nullptr), _output(nullptr)
-{
-}
-
-Status CLGEMMLowpQuantizeDownInt32ScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, output_stage));
-
- return Status{};
-}
-
-void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, output_stage);
-}
-
-void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
- const GEMMLowpOutputStageInfo *output_stage)
-{
- // Perform validate step
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(),
- (bias != nullptr) ? bias->info() : nullptr,
- output->info(),
- output_stage));
-
- auto padding_info = get_padding_info({ input, bias, output });
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_stage->output_data_type));
-
- _input = input;
- _bias = bias;
- _output = output;
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, input->info()->dimension(0));
-
- // Set the arguments to pass at compile time
- auto min = output_stage->gemmlowp_min_bound;
- auto max = output_stage->gemmlowp_max_bound;
- CLBuildOptions build_opts;
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration));
- build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage->gemmlowp_offset));
- build_opts.add_option("-DRESULT_MULT_INT=" + support::cpp11::to_string(output_stage->gemmlowp_multiplier));
- build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage->gemmlowp_shift));
- build_opts.add_option_if((min > std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))) && (min != max),
- "-DMIN_BOUND=" + support::cpp11::to_string(min));
- build_opts.add_option_if((max < std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))) && (min != max),
- "-DMAX_BOUND=" + support::cpp11::to_string(max));
- build_opts.add_option("-DOUTPUT_DATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
- build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
-
- // Create kernel
- _kernel = create_kernel(compile_context, "gemmlowp_output_stage_quantize_down", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-void CLGEMMLowpQuantizeDownInt32ScaleKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- unsigned int idx1 = num_arguments_per_3D_tensor();
- if(_bias != nullptr)
- {
- Window biases_slice(slice);
- biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
- biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
- add_1D_tensor_argument(idx1, _bias, biases_slice);
- }
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx1, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} \ No newline at end of file
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h
deleted file mode 100644
index abdf33ea43..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H
-#define ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
- *
- * This kernel takes a final int32 accumulator value (the output of the matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
- * The following computations will be performed by the kernel:
- *
- * -# Add offset terms to final result
- * -# Multiply each entry of result by result_mult_int
- * -# Add bias to final result if bias tensor is not a nullptr
- * -# Shift the int32 accumulator by result_shift
- * -# Clamp the value between the specified min and max bounds
- * -# Clamp the resulting int32 values:
- * -# -to the [0..255] range and cast to QASYMM8.
- * -# -to the [-128..127] range and cast to QASYMM8_SIGNED.
- *
- */
-class CLGEMMLowpQuantizeDownInt32ScaleKernel : public ICLKernel
-{
-public:
- /** Constructor */
- CLGEMMLowpQuantizeDownInt32ScaleKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpQuantizeDownInt32ScaleKernel(const CLGEMMLowpQuantizeDownInt32ScaleKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- CLGEMMLowpQuantizeDownInt32ScaleKernel &operator=(const CLGEMMLowpQuantizeDownInt32ScaleKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMLowpQuantizeDownInt32ScaleKernel(CLGEMMLowpQuantizeDownInt32ScaleKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMLowpQuantizeDownInt32ScaleKernel &operator=(CLGEMMLowpQuantizeDownInt32ScaleKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] output_stage GEMMLowp output stage metadata.
- */
- void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] output_stage GEMMLowp output stage metadata.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleKernel
- *
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
- * @param[in] output_stage GEMMLowp output stage metadata.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- const ICLTensor *_bias;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-
-#endif /* ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H */ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
deleted file mode 100644
index d508bf6f21..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments_matrix_a_reduction(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8);
-
- if(output->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(0) != input->dimension(1), "Output vector must have length equal to the number of rows of the input matrix");
- }
- return Status{};
-}
-
-Status validate_arguments_matrix_b_reduction(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8, DataType::QSYMM8_PER_CHANNEL);
-
- if(output->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(0) != input->dimension(0), "Output vector must have length equal to the number of columns of the input matrix");
- }
- return Status{};
-}
-} // namespace
-
-ICLGEMMLowpReductionKernel::ICLGEMMLowpReductionKernel()
- : _input(), _output()
-{
-}
-
-void CLGEMMLowpMatrixAReductionKernel::configure(const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), mtx_a, vector_sum_row, info);
-}
-
-void CLGEMMLowpMatrixAReductionKernel::configure(const CLCompileContext &compile_context, const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info)
-{
- // Perform validate step
- ARM_COMPUTE_ERROR_ON_NULLPTR(mtx_a, vector_sum_row);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_matrix_a_reduction(mtx_a->info(), vector_sum_row->info()));
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*vector_sum_row->info(), TensorShape(mtx_a->info()->dimension(1)), 1, DataType::S32);
-
- auto padding_info = get_padding_info({ mtx_a, vector_sum_row });
-
- _input = mtx_a;
- _output = vector_sum_row;
-
- // Set the arguments to pass at compile time
- CLBuildOptions build_opts;
- build_opts.add_option("-DCOLS_A=" + support::cpp11::to_string(mtx_a->info()->dimension(0)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(mtx_a->info()->data_type()));
- build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_dot8_acc_type_from_data_type(mtx_a->info()->data_type()));
- build_opts.add_option_if(info.mul_by_scalar, "-DSCALAR=" + support::cpp11::to_string(info.scalar));
-
- const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device());
-
- std::string kernel_name = "gemmlowp_matrix_a_reduction" + std::string(is_dot8_supported ? "_dot8" : "");
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- // This kernel does not need padding
- Window win = calculate_max_window(*vector_sum_row->info(), Steps());
- ICLKernel::configure_internal(win);
-
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += support::cpp11::to_string(_input->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(_input->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(_input->info()->dimension(2));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMLowpMatrixAReductionKernel::validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, const GEMMLowpReductionKernelInfo &info)
-{
- ARM_COMPUTE_UNUSED(info);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_matrix_a_reduction(mtx_a, vector_sum_row));
-
- return Status{};
-}
-
-void CLGEMMLowpMatrixAReductionKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimY);
- Window slice_in = collapsed.first_slice_window_2D();
- Window slice_out = collapsed.first_slice_window_2D();
-
- // Setup input slice. Its dimensions are increased in the cl kernel.
- slice_in.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
- slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
- add_2D_tensor_argument(idx, _output, slice_out);
- enqueue(queue, *this, slice_out, lws_hint());
- }
- while(collapsed.slide_window_slice_2D(slice_out));
-}
-
-void CLGEMMLowpMatrixBReductionKernel::configure(const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), mtx_b, vector_sum_col, info);
-}
-
-void CLGEMMLowpMatrixBReductionKernel::configure(const CLCompileContext &compile_context, const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(mtx_b, vector_sum_col);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_matrix_b_reduction(mtx_b->info(), vector_sum_col->info()));
-
- _input = mtx_b;
- _output = vector_sum_col;
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*_output->info(), TensorShape(mtx_b->info()->dimension(0)), 1, DataType::S32);
-
- auto padding_info = get_padding_info({ mtx_b, vector_sum_col });
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16, mtx_b->info()->dimension(0));
-
- // Set the arguments to pass at compile time
- CLBuildOptions build_opts;
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(mtx_b->info()->dimension(0) % num_elems_processed_per_iteration));
- build_opts.add_option("-DCOLS_B=" + support::cpp11::to_string(mtx_b->info()->dimension(0)));
- build_opts.add_option("-DROWS_B=" + support::cpp11::to_string(mtx_b->info()->dimension(1)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(mtx_b->info()->data_type()));
- build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_dot8_acc_type_from_data_type(mtx_b->info()->data_type()));
- build_opts.add_option_if(info.mul_by_scalar, "-DSCALAR=" + support::cpp11::to_string(info.scalar));
-
- // Create kernel
- _kernel = create_kernel(compile_context, "gemmlowp_matrix_b_reduction", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*_output->info(), Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMLowpMatrixBReductionKernel::validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, const GEMMLowpReductionKernelInfo &info)
-{
- ARM_COMPUTE_UNUSED(info);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_matrix_b_reduction(mtx_b, vector_sum_col));
-
- return Status{};
-}
-
-void CLGEMMLowpMatrixBReductionKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window collapsed = window.collapse_if_possible(IKernel::window(), Window::DimY);
-
- Window slice_out = collapsed.first_slice_window_2D();
- Window slice_in = slice_out;
-
- slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
- slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
- add_2D_tensor_argument(idx, _output, slice_out);
- enqueue(queue, *this, slice_out, lws_hint());
- }
- while(collapsed.slide_window_slice_2D(slice_out));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpReductionKernel.h b/src/core/CL/kernels/CLGEMMLowpReductionKernel.h
deleted file mode 100644
index 237d8099b7..0000000000
--- a/src/core/CL/kernels/CLGEMMLowpReductionKernel.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMLOWREDUCTIONKERNEL_H
-#define ARM_COMPUTE_CLGEMMLOWREDUCTIONKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-struct GEMMLowpReductionKernelInfo;
-
-/** Common interface for all OpenCL reduction kernels */
-class ICLGEMMLowpReductionKernel : public ICLKernel
-{
-public:
- /** Constructor */
- ICLGEMMLowpReductionKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- ICLGEMMLowpReductionKernel(const ICLGEMMLowpReductionKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers)*/
- ICLGEMMLowpReductionKernel &operator=(const ICLGEMMLowpReductionKernel &) = delete;
- /** Allow instances of this class to be moved */
- ICLGEMMLowpReductionKernel(ICLGEMMLowpReductionKernel &&) = default;
- /** Allow instances of this class to be moved */
- ICLGEMMLowpReductionKernel &operator=(ICLGEMMLowpReductionKernel &&) = default;
-
- /** Initialise the kernel's input and output.
- *
- * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8.
- * @param[out] output Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32
- * @param[in] info Kernel metadata:
- * - k Number of matrix columns/rows depending on the type of reduction.
- * - is_reshaped True if the matrix has been reshaped.
- * - scalar Scalar value to multiply each reduced column/row by.
- * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
- */
- virtual void configure(const ICLTensor *input, ICLTensor *output, const GEMMLowpReductionKernelInfo &info) = 0;
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8.
- * @param[out] output Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32
- * @param[in] info Kernel metadata:
- * - k Number of matrix columns/rows depending on the type of reduction.
- * - is_reshaped True if the matrix has been reshaped.
- * - scalar Scalar value to multiply each reduced column/row by.
- * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
- */
- virtual void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLowpReductionKernelInfo &info) = 0;
-
-protected:
- const ICLTensor *_input;
- ICLTensor *_output;
-};
-
-/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
- *
- * @note This stage is needed to handle the offset of matrix product
- * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
- */
-class CLGEMMLowpMatrixAReductionKernel : public ICLGEMMLowpReductionKernel
-{
-public:
- /** Initialise the kernel's input and output.
- *
- * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8.
- * @param[out] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
- * @param[in] info Kernel metadata:
- * - k Number of matrix columns/rows depending on the type of reduction.
- * - is_reshaped True if the matrix has been reshaped.
- * - scalar Scalar value to multiply each reduced column/row by.
- * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
- */
- void configure(const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info) override;
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8.
- * @param[out] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
- * @param[in] info Kernel metadata:
- * - k Number of matrix columns/rows depending on the type of reduction.
- * - is_reshaped True if the matrix has been reshaped.
- * - scalar Scalar value to multiply each reduced column/row by.
- * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info) override;
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixAReductionKernel
- *
- * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8.
- * @param[in] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
- * @param[in] info Kernel metadata:
- * - k Number of matrix columns/rows depending on the type of reduction.
- * - is_reshaped True if the matrix has been reshaped.
- * - scalar Scalar value to multiply each reduced column/row by.
- * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, const GEMMLowpReductionKernelInfo &info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-};
-
-/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
- *
- * @note This stage is needed to handle the offset of matrix product
- * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
- */
-class CLGEMMLowpMatrixBReductionKernel : public ICLGEMMLowpReductionKernel
-{
-public:
- /** Initialise the kernel's input and output.
- *
- * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL.
- * @param[out] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
- * @param[in] info Kernel metadata:
- * - k Number of matrix columns/rows depending on the type of reduction.
- * - is_reshaped True if the matrix has been reshaped.
- * - scalar Scalar value to multiply each reduced column/row by.
- * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
- */
- void configure(const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info) override;
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL.
- * @param[out] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
- * @param[in] info Kernel metadata:
- * - k Number of matrix columns/rows depending on the type of reduction.
- * - is_reshaped True if the matrix has been reshaped.
- * - scalar Scalar value to multiply each reduced column/row by.
- * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info) override;
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixBReductionKernel
- *
- * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL.
- * @param[in] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
- * @param[in] info Kernel metadata:
- * - k Number of matrix columns/rows depending on the type of reduction.
- * - is_reshaped True if the matrix has been reshaped.
- * - scalar Scalar value to multiply each reduced column/row by.
- * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, const GEMMLowpReductionKernelInfo &info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLGEMMLOWREDUCTIONKERNEL_H */
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
deleted file mode 100644
index 6d3b1e5897..0000000000
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/float_ops.h"
-#include "support/StringSupport.h"
-
-#include <set>
-#include <string>
-
-namespace arm_compute
-{
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-using ElementsProcessed = Steps;
-
-inline Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float beta,
- bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input0);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((fp_mixed_precision && (input0->data_type() != DataType::F16)), "Mixed precision floating point is supported only for F16 data");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the matrix B must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 2 && reshape_info.reinterpret_input_as_3d(), "The input1 tensor cannot have more than 2 dimensions if input0 has to be reinterpreted as 3D");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((reshape_info.reinterpret_input_as_3d() || reshape_info.depth_output_gemm3d() != 0) && (input2 != nullptr)
- && (!reshape_info.broadcast_bias()),
- "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D");
-
- if(!is_interleaved_transposed)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != input1->dimension(1));
-
- if(input2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int m = reshape_info.reinterpret_input_as_3d() ? input0->dimension(1) * input0->dimension(2) : input0->dimension(1);
- const unsigned int n = input1->dimension(0);
- const unsigned int input2_dim0 = input2->dimension(0);
- const unsigned int input2_dim1 = input2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input2, input1);
- if(reshape_info.broadcast_bias())
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim1 != 1 || input2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim0 != n || input2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
- }
- else
- {
- GEMMRHSMatrixInfo rhs_info;
- GEMMLHSMatrixInfo lhs_info;
- const auto m = static_cast<unsigned int>(reshape_info.m());
- const auto n = static_cast<unsigned int>(reshape_info.n());
- const int k = reshape_info.k();
- const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width();
- const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
- rhs_info.n0 = max_cl_vector_width / input1->element_size();
- rhs_info.k0 = 1;
- rhs_info.h0 = mult_transpose1xW_width;
- rhs_info.interleave = false;
- rhs_info.transpose = false;
- lhs_info.m0 = 4;
- lhs_info.k0 = 4;
- lhs_info.v0 = mult_interleave4x4_height;
- lhs_info.interleave = true;
- lhs_info.transpose = true;
-
- TensorShape tensor_shape0{ input0->tensor_shape() };
- tensor_shape0.set(0, k);
- tensor_shape0.set(1, m);
-
- TensorShape tensor_shape1{ input1->tensor_shape() };
- tensor_shape1.set(0, n);
- tensor_shape1.set(1, k);
-
- const TensorInfo tensor_info0 = input0->clone()->set_tensor_shape(tensor_shape0);
- const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
-
- const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_lhs_reshaped_shape(tensor_info0, lhs_info));
- const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
-
- if(input2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int input2_dim0 = input2->dimension(0);
- const unsigned int input2_dim1 = input2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input2, input1);
- if(reshape_info.broadcast_bias())
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim1 != 1 || input2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim0 != n || input2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
- }
-
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, is_interleaved_transposed, reshape_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, output);
- }
-
- return Status{};
-}
-
-inline std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output,
- float beta, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target,
- ElementsProcessed &num_elements_processed)
-{
- ARM_COMPUTE_UNUSED(beta);
- bool window_changed = false;
- Window win{};
- Window win_out{};
-
- const DataType data_type = input0->data_type();
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
- bool reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0);
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(reinterpret_input_as_3d == reinterpret_output_as_3d)
- {
- reinterpret_input_as_3d = false;
- reinterpret_output_as_3d = false;
- }
-
- // Output tensor auto inizialitation if not yet initialized
- auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, is_interleaved_transposed, reshape_info)));
-
- TensorInfo tmp_info(*output);
-
- if(reinterpret_output_as_3d)
- {
- // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(output->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- if(is_interleaved_transposed)
- {
- // reinterpret_input_as_3d is not supported if is_interleaved_transposed is set
- ARM_COMPUTE_ERROR_ON(reshape_info.reinterpret_input_as_3d());
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
- num_elems_processed_per_iteration_y = 4;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- if(input2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- const int bias_processed_per_iteration_y = reshape_info.broadcast_bias() ? 1 : num_elems_processed_per_iteration_y;
-
- AccessWindowStatic input2_access(input2, 0, 0,
- ceil_to_multiple(input2->dimension(0), bias_processed_per_iteration_x),
- ceil_to_multiple(input2->dimension(1), bias_processed_per_iteration_y));
-
- window_changed = update_window_and_padding(win, input2_access); // window used by the execute_window_loop
- }
- }
- else // The input tensors have not been reshaped
- {
- // Special case for 1xN, 2xN, 3xN and 4xN input0 tensor. num_elems_processed_per_iteration_x is set up for the default case.
- num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
- num_elems_processed_per_iteration_y = std::min(static_cast<int>(output->dimension(1)), 4);
-
- // Create kernels according to the architecture, data type and input size.
- GPUTarget arch_target = get_arch_from_target(gpu_target);
- if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32)
- {
- num_elems_processed_per_iteration_x = (input1->dimension(0) <= 1000 && input0->num_dimensions() == 1) ? 2 : 4;
- }
-
- // Configure window
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- AccessWindowStatic input0_access(input0, 0, 0, input0->dimension(0), input0->dimension(1));
- AccessWindowStatic input1_access(input1, 0, 0, ceil_to_multiple(input1->dimension(0), num_elems_processed_per_iteration_x), input1->dimension(1));
- AccessWindowStatic output_access(output, 0, 0,
- output->dimension(0),
- output->dimension(1));
-
- if(input2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- AccessWindowStatic input2_access(input2, 0, 0,
- ceil_to_multiple(input2->dimension(0), bias_processed_per_iteration_x),
- input2->dimension(1));
-
- window_changed = update_window_and_padding(win, input0_access, input1_access, input2_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
- }
- else
- {
- window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
- }
-
- Coordinates coord;
- coord.set_num_dimensions(output->num_dimensions());
- output_access.set_valid_region(win_out, ValidRegion(coord, output->tensor_shape()));
- }
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-CLGEMMMatrixMultiplyKernel::CLGEMMMatrixMultiplyKernel()
- : _input0(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_input_as_3d(false), _reinterpret_output_as_3d(false), _add_bias(false),
- _broadcast_bias(false)
-{
-}
-
-void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
- bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, is_interleaved_transposed, reshape_info, fp_mixed_precision, activation_info);
-}
-
-void CLGEMMMatrixMultiplyKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha,
- float beta,
- bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
-
- // Perform validate step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr) ? input2->info() : nullptr, output->info(), beta,
- is_interleaved_transposed, reshape_info, fp_mixed_precision));
-
- auto padding_info = is_interleaved_transposed ? get_padding_info({ input0, input1, output }) : get_padding_info({ input0, output });
-
- _input0 = input0;
- _input1 = input1;
- _input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2;
- _output = output;
- _reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
- _reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0);
- _add_bias = _input2 != nullptr;
- _broadcast_bias = reshape_info.broadcast_bias();
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(_reinterpret_input_as_3d == _reinterpret_output_as_3d)
- {
- _reinterpret_input_as_3d = false;
- _reinterpret_output_as_3d = false;
- }
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_input0 = _reinterpret_input_as_3d ? _input0->info()->num_dimensions() - 1 : _input0->info()->num_dimensions();
-
- _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
-
- const DataType data_type = input0->info()->data_type();
-
- // Get target architecture
- GPUTarget gpu_target = get_target();
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input0->info(), input1->info(), (input2 != nullptr) ? input2->info() : nullptr, output->info(), beta, is_interleaved_transposed, reshape_info,
- gpu_target, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true, both will be turned off (false)
- // in which case we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
- // This means that the actual m used by the kernel is given by output->info()->dimension(1)
- const unsigned int internal_m = _reinterpret_output_as_3d ? output->info()->dimension(1) * output->info()->dimension(2) : output->info()->dimension(1);
- const unsigned int n = output->info()->dimension(0);
-
- const unsigned int h_gemm_3d = _reinterpret_output_as_3d ? output->info()->dimension(1) : input0->info()->dimension(1);
- const unsigned int d_gemm_3d = _reinterpret_output_as_3d ? output->info()->dimension(2) : input0->info()->dimension(2);
-
- const unsigned int m0 = num_elements_processed.y();
- const unsigned int n0 = num_elements_processed.x();
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int partial_store_m0 = internal_m % m0;
- const unsigned int partial_store_n0 = n % n0;
-
- // Create build options
- CLBuildOptions build_opts;
-
- build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
- build_opts.add_option_if(_input2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
- build_opts.add_option_if(reshape_info.broadcast_bias(), "-DBROADCAST_BIAS");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d));
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2)));
- build_opts.add_option_if(activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(activation_info.activation())));
- build_opts.add_option_if(activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(activation_info.a()));
- build_opts.add_option_if(activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(activation_info.b()));
-
- const bool is_bifrost = get_arch_from_target(gpu_target) == GPUTarget::BIFROST;
-
- std::string kernel_name;
- if(is_interleaved_transposed)
- {
- const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width();
- const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
-
- build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(input1->info()->dimension(0) / (n0 * mult_transpose1xW_width)));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(mult_transpose1xW_width));
- build_opts.add_option("-DV0=" + support::cpp11::to_string(mult_interleave4x4_height));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
-
- if(is_data_type_float(data_type) && is_bifrost)
- {
- kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type)) + "_bifrost";
- }
- else
- {
- kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type));
- if(fp_mixed_precision && data_type == DataType::F16)
- {
- // currently wider accumulator is only supported for fp16 kernels.
- kernel_name += "_acc32";
- }
- }
- }
- else // The input tensors have not been reshaped
- {
- build_opts.add_option("-DN=" + support::cpp11::to_string(n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(input0->info()->dimension(0)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
-
- // Create kernels according to the architecture, data type and input size.
- if(is_data_type_float(data_type) && is_bifrost)
- {
- kernel_name = "gemm_mm_floating_point";
-
- if(input0->info()->num_dimensions() != 1)
- {
- kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost";
- if(fp_mixed_precision && data_type == DataType::F16)
- {
- // currently wider accumulator is only supported for fp16 kernels.
- kernel_name += "_acc32";
- }
- }
- else if(input1->info()->dimension(0) <= 1000 && data_type == DataType::F32)
- {
- // The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and
- // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g.
- // FC6 and FC7 of AlexNet and VGG-16).
- kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost_1000";
- }
-
- // The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels
- // via exhaustive autotuning over a range of representative layer configurations.
- set_lws_hint(cl::NDRange(4));
- }
- else // (MIDGARD and F32) or (F16)
- {
- kernel_name = "gemm_mm_floating_point";
- }
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = "gemm_";
- _config_id += (is_interleaved_transposed ? "reshaped_" : "");
- _config_id += (_add_bias ? "add_bias_" : "");
- _config_id += (_broadcast_bias ? "broadcast_bias_" : "");
- _config_id += (fp_mixed_precision ? "fp_mixed_" : "");
- _config_id += (_reinterpret_input_as_3d ? "3di_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += lower_string(string_from_data_type(input0->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(3));
- _config_id += "_";
- _config_id += (is_interleaved_transposed ? support::cpp11::to_string(input1->info()->dimension(0)) : support::cpp11::to_string(input1->info()->dimension(1)));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMMatrixMultiplyKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
- bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
-{
- // Note: num_elements_processed will be set in validate_and_configure_window()
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_UNUSED(alpha);
- ARM_COMPUTE_UNUSED(activation_info);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, input2, output, beta, is_interleaved_transposed, reshape_info, fp_mixed_precision));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
- input1->clone().get(),
- (input2 != nullptr) ? input2->clone().get() : nullptr,
- output->clone().get(),
- beta,
- is_interleaved_transposed,
- reshape_info,
- gpu_target,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void CLGEMMMatrixMultiplyKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- if(_input1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(_input1->info()->strides_in_bytes()[3] != 0);
- }
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- const unsigned int num_arguments_bias = _add_bias ? num_arguments_per_2D_tensor() + 1 : 0;
-
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + num_arguments_bias;
- const unsigned int total_cross_plane_pad = _input0->info()->padding().top + _input0->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- if(_reinterpret_output_as_3d)
- {
- // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0) + num_arguments_bias;
- const unsigned int total_cross_plane_pad = _output->info()->padding().top + _output->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input0, slice);
- add_2D_tensor_argument(idx, _input1, slice_b);
- if(_add_bias)
- {
- add_2D_tensor_argument(idx, _input2, slice);
- }
- add_2D_tensor_argument(idx, _output, slice);
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input0->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input1->info()->strides_in_bytes()[2]));
- if(_add_bias)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input2->info()->strides_in_bytes()[2]));
- }
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[2]));
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
deleted file mode 100644
index 71d223b8ac..0000000000
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMMATRIXMULTIPLYKERNEL_H
-#define ARM_COMPUTE_CLGEMMMATRIXMULTIPLYKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to multiply two input matrices "A" and "B" and add a martix "C" if provided. All elements of the output matrix will be multiplied by alpha. In case matrix C is passed, it will be added to the previous result.
- * For the matrix C, the broadcast addition is supported if the flag "broadcast_bias" is set in the GEMMReshapeInfo object
- *
- * @note If the input tensors @p input0 and @p input1 have been reshaped respectively with @ref CLGEMMReshapeLHSMatrixKernel" and @ref CLGEMMReshapeRHSMatrixKernel,
- * the flag @p is_interleaved_transposed must be set to true
- *
- * @attention @p input1 tensor must have at least 2 dimensions (matrix)
- *
- */
-class CLGEMMMatrixMultiplyKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLGEMMMatrixMultiplyKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMMatrixMultiplyKernel(const CLGEMMMatrixMultiplyKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMMatrixMultiplyKernel &operator=(const CLGEMMMatrixMultiplyKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMMatrixMultiplyKernel(CLGEMMMatrixMultiplyKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMMatrixMultiplyKernel &operator=(CLGEMMMatrixMultiplyKernel &&) = default;
- /** Initialise the kernel's input, output and alpha
- *
- * @param[in] input0 Input tensor containing the Matrix A. Data types supported: F16/F32
- * @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0
- * @param[in] input2 Input tensor containing the Matrix C (bias). Can be nullptr. Data type supported: same as @p input0
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta (Optional) Weight of vector C. Default value is 0. Only beta = 1 is currently supported.
- * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel
- * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
- * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy
- * @param[in] activation_info (Optional) Activation to apply after the matrix multiplication
- *
- */
- void configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta = 0.f,
- bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo(), bool fp_mixed_precision = false, const ActivationLayerInfo &activation_info = ActivationLayerInfo());
- /** Initialise the kernel's input, output and alpha
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input0 Input tensor containing the Matrix A. Data types supported: F16/F32
- * @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0
- * @param[in] input2 Input tensor containing the Matrix C (bias). Can be nullptr. Data type supported: same as @p input0
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta (Optional) Weight of vector C. Default value is 0. Only beta = 1 is currently supported.
- * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel
- * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
- * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy
- * @param[in] activation_info (Optional) Activation to apply after the matrix multiplication
- *
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta = 0.f,
- bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo(), bool fp_mixed_precision = false, const ActivationLayerInfo &activation_info = ActivationLayerInfo());
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyKernel
- *
- * @param[in] input0 Input tensor containing the Matrix A info. Data types supported: F16/F32
- * @param[in] input1 Input tensor containing the Matrix B info. Data type supported: same as @p input0
- * @param[in] input2 Input tensor containing the Matrix C (bias) info. Can be nullptr. Data type supported: same as @p input0
- * @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of vector C. Default value is 0. Only beta = 1 is currently supported.
- * @param[in] is_interleaved_transposed True if input0 and input1 have been reshaped respectively using @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel
- * @param[in] reshape_info GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
- * @param[in] gpu_target GPU Target
- * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy
- * @param[in] activation_info (Optional) Activation to apply after the matrix multiplication
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
- bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision = false, const ActivationLayerInfo &activation_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-public:
- const ICLTensor *_input0;
- const ICLTensor *_input1;
- const ICLTensor *_input2;
- ICLTensor *_output;
- bool _slide_matrix_b;
- bool _reinterpret_input_as_3d;
- bool _reinterpret_output_as_3d;
- bool _add_bias;
- bool _broadcast_bias;
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLGEMMMATRIXMULTIPLYKERNEL_H */
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
deleted file mode 100644
index f07166e4bb..0000000000
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/float_ops.h"
-#include "support/StringSupport.h"
-
-#include <cstddef>
-#include <cstdint>
-#include <tuple>
-
-using namespace arm_compute::misc::shape_calculator;
-
-namespace arm_compute
-{
-namespace
-{
-using ElementsProcessed = Steps;
-
-Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_UNUSED(alpha);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.k0 & (rhs_info.k0 - 1)) && rhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 1 || lhs_info.m0 > 8);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((gemm_info.reinterpret_input_as_3d || gemm_info.depth_output_gemm3d != 0) && (input2 != nullptr)
- && (!gemm_info.broadcast_bias),
- "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.fp_mixed_precision, "Mixed precision not supported");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_info.export_to_cl_image, "Export to CLImage not supported for GEMM native");
-
- const unsigned int m = gemm_info.m;
- const unsigned int n = gemm_info.n;
- const unsigned int k = gemm_info.k;
-
- ARM_COMPUTE_UNUSED(m);
- ARM_COMPUTE_UNUSED(n);
- ARM_COMPUTE_UNUSED(k);
-
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != k);
- ARM_COMPUTE_RETURN_ERROR_ON(input1->dimension(0) != n);
- ARM_COMPUTE_RETURN_ERROR_ON(input1->dimension(1) != k);
- if(gemm_info.reinterpret_input_as_3d)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) * input0->dimension(2) != m);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) != m);
- }
-
- if(input2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int input2_dim0 = input2->dimension(0);
- const unsigned int input2_dim1 = input2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input2, input1);
- if(gemm_info.broadcast_bias)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim1 != 1 || input2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim0 != n || input2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
-
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info, ElementsProcessed &num_elements_processed)
-{
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
-
- Window win{};
- Window win_out{};
- bool window_changed = false;
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(reinterpret_input_as_3d == reinterpret_output_as_3d)
- {
- reinterpret_output_as_3d = false;
- }
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info)));
-
- TensorInfo tmp_info(*output);
-
- if(reinterpret_output_as_3d)
- {
- // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(output->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = rhs_info.n0;
- num_elems_processed_per_iteration_y = lhs_info.m0;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowStatic input0_access(input0, 0, 0,
- input0->dimension(0),
- input0->dimension(1));
- AccessWindowStatic input1_access(input1, 0, 0,
- ceil_to_multiple(input1->dimension(0), num_elems_processed_per_iteration_x),
- input1->dimension(1));
- AccessWindowStatic output_access(output, 0, 0,
- output->dimension(0),
- output->dimension(1));
-
- if(input2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- AccessWindowStatic input2_access(input2, 0, 0,
- ceil_to_multiple(input2->dimension(0), bias_processed_per_iteration_x),
- input2->dimension(1));
-
- window_changed = update_window_and_padding(win, input0_access, input1_access, input2_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
- }
- else
- {
- window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
- }
-
- output_access.set_valid_region(win_out, ValidRegion(Coordinates(), output->tensor_shape()));
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-CLGEMMMatrixMultiplyNativeKernel::CLGEMMMatrixMultiplyNativeKernel()
- : _input0(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_input_as_3d(false), _reinterpret_output_as_3d(false), _use_dummy_work_items(false),
- _add_bias(false), _broadcast_bias(false)
-{
-}
-
-void CLGEMMMatrixMultiplyNativeKernel::configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info);
-}
-
-void CLGEMMMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha,
- float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
-
- auto padding_info = get_padding_info({ input0, output });
- _input0 = input0;
- _input1 = input1;
- _input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2;
- _output = output;
- _reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- _reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
- _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
- _add_bias = _input2 != nullptr;
- _broadcast_bias = gemm_info.broadcast_bias;
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(_reinterpret_input_as_3d == _reinterpret_output_as_3d)
- {
- _reinterpret_input_as_3d = false;
- _reinterpret_output_as_3d = false;
- }
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_input0 = _input0->info()->num_dimensions();
- _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input0->info(), input1->info(), input2 != nullptr ? input2->info() : nullptr, output->info(), lhs_info, rhs_info, gemm_info, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true,
- // we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
- // This means that the actual m used by the kernel is given by output->info()->dimension(1) and not by gemm_info.m
- const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m : output->info()->dimension(1);
-
- const unsigned int h_gemm_3d = _reinterpret_output_as_3d ? output->info()->dimension(1) : input0->info()->dimension(1);
- const unsigned int d_gemm_3d = _reinterpret_output_as_3d ? output->info()->dimension(2) : input0->info()->dimension(2);
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int partial_store_m0 = internal_m % lhs_info.m0;
- const unsigned int partial_store_n0 = gemm_info.n % rhs_info.n0;
-
- // Shrink M0 to be always <= M (internal_m) to prevent out-of-bounds reads.
- // NOTE: This might have implications on heuristics and performance
- const unsigned int internal_m0 = std::min(internal_m, lhs_info.m0);
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type()));
- build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
- build_opts.add_option_if(_input2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
- build_opts.add_option_if(gemm_info.broadcast_bias, "-DBROADCAST_BIAS");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d));
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2)));
- build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(internal_m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(gemm_info.activation_info.activation())));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.a()));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.b()));
-
- std::string kernel_name("gemm_mm_native");
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += (_add_bias ? "add_bias_" : "");
- _config_id += (_broadcast_bias ? "broadcast_bias_" : "");
- _config_id += (_reinterpret_input_as_3d ? "3di_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += (gemm_info.activation_info.enabled() ? "fused_activation_" : "");
- _config_id += lower_string(string_from_data_type(input0->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(gemm_info.k);
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.n0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.k0);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMMatrixMultiplyNativeKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
- input1->clone().get(),
- input2 != nullptr ? input2->clone().get() : nullptr,
- output->clone().get(),
- lhs_info,
- rhs_info,
- gemm_info,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void CLGEMMMatrixMultiplyNativeKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- if(_input1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(_input1->info()->strides_in_bytes()[3] != 0);
- }
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
- unsigned int idx0;
- if(_add_bias)
- {
- idx0 = 4 * num_arguments_per_2D_tensor() + 4;
- }
- else
- {
- idx0 = 3 * num_arguments_per_2D_tensor() + 3;
- }
- const unsigned int total_cross_plane_pad = _input0->info()->padding().top + _input0->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- if(_reinterpret_output_as_3d)
- {
- // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor
- unsigned int idx0;
- if(_add_bias)
- {
- idx0 = 4 * num_arguments_per_2D_tensor() + 4 + (_reinterpret_input_as_3d ? 1 : 0);
- }
- else
- {
- idx0 = 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0);
- }
- const unsigned int total_cross_plane_pad = _output->info()->padding().top + _output->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input0, slice);
- add_2D_tensor_argument(idx, _input1, slice_b);
- if(_add_bias)
- {
- add_2D_tensor_argument(idx, _input2, slice);
- }
- add_2D_tensor_argument(idx, _output, slice);
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input0->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input1->info()->strides_in_bytes()[2]));
- if(_add_bias)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input2->info()->strides_in_bytes()[2]));
- }
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[2]));
- enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h
deleted file mode 100644
index 6b6004b464..0000000000
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMMATRIXMULTIPLYNATIVEKERNEL_H
-#define ARM_COMPUTE_CLGEMMMATRIXMULTIPLYNATIVEKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-#include "arm_compute/core/KernelDescriptors.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to multiply matrices when neither of the input matrices have been reshaped */
-class CLGEMMMatrixMultiplyNativeKernel : public ICLKernel
-{
-public:
- /** Default Constructor */
- CLGEMMMatrixMultiplyNativeKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMMatrixMultiplyNativeKernel(const CLGEMMMatrixMultiplyNativeKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMMatrixMultiplyNativeKernel &operator=(const CLGEMMMatrixMultiplyNativeKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMMatrixMultiplyNativeKernel(CLGEMMMatrixMultiplyNativeKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMMatrixMultiplyNativeKernel &operator=(CLGEMMMatrixMultiplyNativeKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] input0 Input tensor for the LHS matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] input1 Input tensor for the RHS matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0.
- * @param[out] output Output tensor info. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows and accumulations to be processed by each thread. Only the following values are supported:
- * lhs_info.m0: 1,2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * @param[in] rhs_info RHS matrix information used to retrieve the number of columns and accumulations to be processed by each thread. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same of lhs_info.k0
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- */
- void configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input0 Input tensor for the LHS matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] input1 Input tensor for the RHS matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0.
- * @param[out] output Output tensor info. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows and accumulations to be processed by each thread. Only the following values are supported:
- * lhs_info.m0: 1,2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * @param[in] rhs_info RHS matrix information used to retrieve the number of columns and accumulations to be processed by each thread. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same of lhs_info.k0
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyNativeKernel
- *
- * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] input1 Input tensor info for the RHS matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[in] input2 Input tensor info containing the bias matrix. Data type supported: same as @p input0.
- * @param[in] output Output tensor info. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows and accumulations to be processed by each thread. Only the following values are supported:
- * lhs_info.m0: 1,2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * @param[in] rhs_info RHS matrix information used to retrieve the number of columns and accumulations to be processed by each thread. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same of lhs_info.k0
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input0;
- const ICLTensor *_input1;
- const ICLTensor *_input2;
- ICLTensor *_output;
- bool _slide_matrix_b;
- bool _reinterpret_input_as_3d;
- bool _reinterpret_output_as_3d;
- bool _use_dummy_work_items;
- bool _add_bias;
- bool _broadcast_bias;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMMATRIXMULTIPLYNATIVEKERNEL_H*/
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
deleted file mode 100644
index 9f1ffa48eb..0000000000
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
+++ /dev/null
@@ -1,443 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLUtils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/float_ops.h"
-#include "support/StringSupport.h"
-
-#include <cstddef>
-#include <cstdint>
-#include <tuple>
-
-using namespace arm_compute;
-using namespace arm_compute::misc::shape_calculator;
-
-namespace arm_compute
-{
-class Coordinates;
-} // namespace arm_compute
-
-namespace
-{
-using ElementsProcessed = Steps;
-
-Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_UNUSED(alpha);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input0);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 != rhs_info.k0);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.transpose == rhs_info.transpose);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((lhs_info.k0 & (lhs_info.k0 - 1)) && lhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((lhs_info.transpose) && ((lhs_info.m0 & (lhs_info.m0 - 1)) && lhs_info.m0 != 3), "Only 2,3,4,8,16 are supported for m0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((rhs_info.transpose) && ((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((gemm_info.reinterpret_input_as_3d || gemm_info.depth_output_gemm3d != 0) && (input2 != nullptr)
- && (!gemm_info.broadcast_bias),
- "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.fp_mixed_precision && (input0->data_type() == DataType::F32), "Mixed precision only supported for F16 data type");
- ARM_COMPUTE_RETURN_ON_ERROR(cl_gemm::validate_image2d_support_on_rhs(*input1, rhs_info));
-
- const unsigned int m = gemm_info.m;
- const unsigned int n = gemm_info.n;
- const unsigned int k = gemm_info.k;
-
- TensorShape tensor_shape0{ input0->tensor_shape() };
- tensor_shape0.set(0, k);
- tensor_shape0.set(1, m);
-
- TensorShape tensor_shape1{ input1->tensor_shape() };
- tensor_shape1.set(0, n);
- tensor_shape1.set(1, k);
-
- if(input2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int input2_dim0 = input2->dimension(0);
- const unsigned int input2_dim1 = input2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input2, input1);
- if(gemm_info.broadcast_bias)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim1 != 1 || input2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim0 != n || input2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
-
- const TensorInfo tensor_info0 = input0->clone()->set_tensor_shape(tensor_shape0);
- const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
-
- const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_lhs_reshaped_shape(tensor_info0, lhs_info));
- const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
-
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info, ElementsProcessed &num_elements_processed)
-{
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
-
- Window win{};
- Window win_out{};
- bool window_changed = false;
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info)));
-
- TensorInfo tmp_info(*output);
-
- if(reinterpret_output_as_3d)
- {
- // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(output->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = rhs_info.n0;
- num_elems_processed_per_iteration_y = lhs_info.m0;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowStatic input0_access(input0, 0, 0,
- input0->dimension(0),
- input0->dimension(1));
- AccessWindowStatic input1_access(input1, 0, 0,
- input1->dimension(0),
- input1->dimension(1));
- AccessWindowStatic output_access(output, 0, 0,
- output->dimension(0),
- output->dimension(1));
-
- if(input2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- const int bias_processed_per_iteration_y = gemm_info.broadcast_bias ? 1 : num_elems_processed_per_iteration_y;
-
- AccessWindowStatic input2_access(input2, 0, 0,
- ceil_to_multiple(input2->dimension(0), bias_processed_per_iteration_x),
- ceil_to_multiple(input2->dimension(1), bias_processed_per_iteration_y));
-
- window_changed = update_window_and_padding(win, input0_access, input1_access, input2_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
- }
- else
- {
- window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
- }
-
- output_access.set_valid_region(win_out, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-CLGEMMMatrixMultiplyReshapedKernel::CLGEMMMatrixMultiplyReshapedKernel()
- : _input0(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_output_as_3d(false), _use_dummy_work_items(false), _add_bias(false),
- _broadcast_bias(false), _export_to_cl_image(false), _k(1)
-{
-}
-
-void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info);
-}
-
-void CLGEMMMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha,
- float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
-
- auto padding_info = get_padding_info({ input0, output });
- _input0 = input0;
- _input1 = input1;
- _input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2;
- _output = output;
- _reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
- _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
- _add_bias = _input2 != nullptr;
- _broadcast_bias = gemm_info.broadcast_bias;
- _export_to_cl_image = rhs_info.export_to_cl_image;
- _k = gemm_info.k;
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_input0 = _input0->info()->num_dimensions();
- _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input0->info(), input1->info(), input2 != nullptr ? input2->info() : nullptr, output->info(), lhs_info, rhs_info, gemm_info, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- const bool enable_mixed_precision = gemm_info.fp_mixed_precision;
- const DataType data_type = input0->info()->data_type();
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m : output->info()->dimension(1);
-
- const unsigned int partial_store_m0 = internal_m % lhs_info.m0;
- const unsigned int partial_store_n0 = gemm_info.n % rhs_info.n0;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
- build_opts.add_option_if(_input2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(1)));
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(2)));
- build_opts.add_option_if(gemm_info.broadcast_bias, "-DBROADCAST_BIAS");
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2)));
- build_opts.add_option_if(lhs_info.interleave, "-DLHS_INTERLEAVE");
- build_opts.add_option_if(rhs_info.interleave, "-DRHS_INTERLEAVE");
- build_opts.add_option_if(lhs_info.transpose, "-DLHS_TRANSPOSE");
- build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(gemm_info.activation_info.activation())));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.a()));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.b()));
- build_opts.add_option_if(enable_mixed_precision, "-DMIXED_PRECISION");
- build_opts.add_option_if(rhs_info.export_to_cl_image, "-DOPENCL_IMAGE_SUPPORT");
- build_opts.add_option("-DRHS_HEIGHT=" + support::cpp11::to_string(input1->info()->dimension(1)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_opts.add_option("-DDATA_TYPE_ACCUMULATOR=" + (enable_mixed_precision ? get_cl_type_from_data_type(DataType::F32) : get_cl_type_from_data_type(data_type)));
- build_opts.add_option("-DM=" + support::cpp11::to_string(gemm_info.m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
- build_opts.add_option("-DV0=" + support::cpp11::to_string(lhs_info.v0));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
-
- std::string kernel_name("gemm_mm_reshaped_");
- kernel_name += lhs_info.transpose ? "lhs_t_" : "lhs_nt_";
- kernel_name += rhs_info.transpose ? "rhs_t" : "rhs_nt";
- kernel_name += rhs_info.export_to_cl_image ? "_texture" : "";
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += (_add_bias ? "add_bias_" : "");
- _config_id += (_broadcast_bias ? "broadcast_bias_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += (gemm_info.activation_info.enabled() ? "fused_activation_" : "");
- _config_id += lower_string(string_from_data_type(input0->info()->data_type()));
- _config_id += "_";
- _config_id += (enable_mixed_precision ? "mixed_precision_" : "");
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(gemm_info.k);
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.n0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.k0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.v0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.h0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.interleave);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.interleave);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMMatrixMultiplyReshapedKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
- input1->clone().get(),
- input2 != nullptr ? input2->clone().get() : nullptr,
- output->clone().get(),
- lhs_info,
- rhs_info,
- gemm_info,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void CLGEMMMatrixMultiplyReshapedKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- if(_input1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(_input1->info()->strides_in_bytes()[3] != 0);
- }
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- const unsigned int total_cross_plane_pad = _output->info()->padding().top + _output->info()->padding().bottom;
-
- cl::Image2D input1_image2d;
-
- if(_export_to_cl_image)
- {
- const TensorShape shape2d(_input1->info()->dimension(0) / 4, _input1->info()->dimension(1) * _input1->info()->dimension(2));
- const size_t image_row_pitch = _input1->info()->strides_in_bytes()[1];
-
- input1_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), _input1->cl_buffer(), shape2d, _input1->info()->data_type(), image_row_pitch);
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
-
- // LHS buffer
- add_2D_tensor_argument(idx, _input0, slice);
-
- // RHS buffer or RHS OpenCL image (_export_to_cl_image == true)
- if(_export_to_cl_image)
- {
- _kernel.setArg(idx++, input1_image2d);
- }
- else
- {
- add_2D_tensor_argument(idx, _input1, slice_b);
- }
-
- // Bias buffer (_add_bias == true)
- add_2D_tensor_argument_if(_add_bias, idx, _input2, slice);
-
- // Output buffer
- add_2D_tensor_argument(idx, _output, slice);
-
- // K dimension (not used if _export_to_cl_image == true)
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_k));
-
- // LHS stride_z
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input0->info()->strides_in_bytes()[2]));
-
- // RHS stride_z (not used if _export_to_cl_image == true)
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input1->info()->strides_in_bytes()[2]));
-
- // Bias stride_z (if _add_bias == true)
- if(_add_bias)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input2->info()->strides_in_bytes()[2]));
- }
-
- // Output stride_z
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[2]));
-
- // Cross-plan padding (if _reinterpret_output_as_3d = true)
- if(_reinterpret_output_as_3d)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- // Dispatch kernel
- enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
- }
- while(window.slide_window_slice_3D(slice));
-}
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h
deleted file mode 100644
index 2ffc322def..0000000000
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDKERNEL_H
-#define ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-#include "arm_compute/core/KernelDescriptors.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to multiply matrices when both the input matrices LHS (input0) and RHS (input1) have been reshaped
- *
- * @note The input matrices @p input0 and @p input1 must be reshaped through @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel
- */
-class CLGEMMMatrixMultiplyReshapedKernel : public ICLKernel
-{
-public:
- /** Default Constructor */
- CLGEMMMatrixMultiplyReshapedKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMMatrixMultiplyReshapedKernel(const CLGEMMMatrixMultiplyReshapedKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMMatrixMultiplyReshapedKernel &operator=(const CLGEMMMatrixMultiplyReshapedKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMMatrixMultiplyReshapedKernel(CLGEMMMatrixMultiplyReshapedKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMMatrixMultiplyReshapedKernel &operator=(CLGEMMMatrixMultiplyReshapedKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @note The F16 computation also supports mixed precision through the gemm_info.fp_mixed_precision flag.
- * Mixed precision combines different floating precisions during the computation, in particular, F32 for the accumulations and F16 for the
- * multiplications. i.e. float c = (half)a * (half)b
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
- * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
- * the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# The stride Y for the input1 should satisfy the OpenCL pitch alignment requirement
- * -# input1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# input1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- *
- * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true). The number of dimensions for the LHS matrix must be less or equal than 4
- * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3
- * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0.
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.transpose: false
- * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
- * rhs_info.k0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
- * rhs_info.transpose: true
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @note lhs_info.k0 must be equal to rhs_info.k0
- */
- void configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
- /** Initialise the kernel's input and output.
- *
- * @note The F16 computation also supports mixed precision through the gemm_info.fp_mixed_precision flag.
- * Mixed precision combines different floating precisions during the computation, in particular, F32 for the accumulations and F16 for the
- * multiplications. i.e. float c = (half)a * (half)b
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
- * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
- * the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# The stride Y for the input1 should satisfy the OpenCL pitch alignment requirement
- * -# input1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# input1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true). The number of dimensions for the LHS matrix must be less or equal than 4
- * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3
- * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0.
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.transpose: false
- * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
- * rhs_info.k0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
- * rhs_info.transpose: true
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @note lhs_info.k0 must be equal to rhs_info.k0
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyReshapedKernel
- *
- * @note The F16 computation also supports mixed precision through the gemm_info.fp_mixed_precision flag.
- * Mixed precision combines different floating precisions during the computation, in particular, F32 for the accumulations and F16 for the
- * multiplications. i.e. float c = (half)a * (half)b
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
- * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
- * the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# The stride Y for the input1 should satisfy the OpenCL pitch alignment requirement
- * -# input1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# input1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- *
- * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true). The number of dimensions for the LHS matrix must be less or equal than 4
- * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3
- * @param[in] input2 Input tensor info containing the bias matrix. Data type supported: same as @p input0.
- * @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.transpose: false
- * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
- * rhs_info.k0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
- * rhs_info.transpose: true
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @note lhs_info.k0 must be equal to rhs_info.k0
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input0;
- const ICLTensor *_input1;
- const ICLTensor *_input2;
- ICLTensor *_output;
- bool _slide_matrix_b;
- bool _reinterpret_output_as_3d;
- bool _use_dummy_work_items;
- bool _add_bias;
- bool _broadcast_bias;
- bool _export_to_cl_image;
- unsigned int _k;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDKERNEL_H*/ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp
deleted file mode 100644
index 3dee4f24cd..0000000000
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp
+++ /dev/null
@@ -1,449 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLUtils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/float_ops.h"
-#include "support/StringSupport.h"
-
-#include <tuple>
-
-using namespace arm_compute::misc::shape_calculator;
-
-namespace arm_compute
-{
-namespace
-{
-using ElementsProcessed = Steps;
-
-Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_UNUSED(alpha);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input0);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_info.m0 < 1 || lhs_info.m0 > 8, "Only 1,2,3,4,5,6,7,8 are supported for m0");
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 > 16 || rhs_info.k0 < 2);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.k0 & (rhs_info.k0 - 1)) && rhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.n0 > 16 || rhs_info.n0 < 2);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((gemm_info.reinterpret_input_as_3d || gemm_info.depth_output_gemm3d != 0) && (input2 != nullptr)
- && (!gemm_info.broadcast_bias),
- "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.fp_mixed_precision, "Mixed precision not supported");
- ARM_COMPUTE_RETURN_ON_ERROR(cl_gemm::validate_image2d_support_on_rhs(*input1, rhs_info));
-
- const unsigned int m = gemm_info.m;
- const unsigned int n = gemm_info.n;
- const unsigned int k = gemm_info.k;
-
- TensorShape tensor_shape1{ input1->tensor_shape() };
- tensor_shape1.set(0, n);
- tensor_shape1.set(1, k);
-
- if(input2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int input2_dim0 = input2->dimension(0);
- const unsigned int input2_dim1 = input2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input2, input0);
- if(gemm_info.broadcast_bias)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim1 != 1 || input2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim0 != n || input2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
-
- const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
-
- const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != k);
- if(gemm_info.reinterpret_input_as_3d)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) * input0->dimension(2) != m);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) != m);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
-
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info, ElementsProcessed &num_elements_processed)
-{
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
-
- Window win{};
- Window win_out{};
- bool window_changed = false;
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- // This approach should only be used when the input/output tensors have pad on the y direction
- if((reinterpret_input_as_3d == reinterpret_output_as_3d) && gemm_info.has_pad_y)
- {
- reinterpret_output_as_3d = false;
- }
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info)));
-
- TensorInfo tmp_info(*output);
-
- if(reinterpret_output_as_3d)
- {
- // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(output->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = rhs_info.n0;
- num_elems_processed_per_iteration_y = lhs_info.m0;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- if(input2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- AccessWindowStatic input2_access(input2, 0, 0,
- ceil_to_multiple(input2->dimension(0), bias_processed_per_iteration_x),
- input2->dimension(1));
-
- window_changed = update_window_and_padding(win, input2_access);
- }
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::CLGEMMMatrixMultiplyReshapedOnlyRHSKernel()
- : _input0(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_input_as_3d(false), _reinterpret_output_as_3d(false), _use_dummy_work_items(false),
- _add_bias(false), _broadcast_bias(false), _export_to_cl_image(false), _has_pad_y(false)
-{
-}
-
-void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info);
-}
-
-void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
- float alpha,
- float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
-
- _input0 = input0;
- _input1 = input1;
- _input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2;
- _output = output;
- _reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- _reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
- _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
- _add_bias = _input2 != nullptr;
- _broadcast_bias = gemm_info.broadcast_bias;
- _export_to_cl_image = rhs_info.export_to_cl_image;
- _has_pad_y = gemm_info.has_pad_y;
-
- auto padding_info = get_padding_info({ input0, input1, output });
-
- // In case both input and output have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if((_reinterpret_input_as_3d == _reinterpret_output_as_3d) && _has_pad_y)
- {
- _reinterpret_input_as_3d = false;
- _reinterpret_output_as_3d = false;
- }
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_input0 = _input0->info()->num_dimensions();
- _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input0->info(), input1->info(), input2 != nullptr ? input2->info() : nullptr, output->info(), lhs_info, rhs_info, gemm_info, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true,
- // we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
- // This means that the actual m used by the kernel is given by output->info()->dimension(1) and not by gemm_info.m
- const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m : output->info()->dimension(1);
-
- // These variables are used only if gemm_info.has_pad_y == true
- const unsigned int h_gemm_3d = _reinterpret_output_as_3d ? output->info()->dimension(1) : input0->info()->dimension(1);
- const unsigned int d_gemm_3d = _reinterpret_output_as_3d ? output->info()->dimension(2) : input0->info()->dimension(2);
-
- // Shrink M0 to be always <= M (internal_m) to prevent out-of-bounds reads.
- // NOTE: This might have implications on heuristics and performance
- const unsigned int internal_m0 = std::min(internal_m, lhs_info.m0);
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int partial_store_m0 = internal_m % internal_m0;
- const unsigned int partial_store_n0 = gemm_info.n % rhs_info.n0;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type()));
- build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
- build_opts.add_option_if(_input2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
- build_opts.add_option_if(gemm_info.broadcast_bias, "-DBROADCAST_BIAS");
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2)));
- build_opts.add_option_if(rhs_info.interleave, "-DRHS_INTERLEAVE");
- build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option_if(rhs_info.export_to_cl_image, "-DOPENCL_IMAGE_SUPPORT");
- build_opts.add_option("-DRHS_HEIGHT=" + support::cpp11::to_string(input1->info()->dimension(1)));
- build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(internal_m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(gemm_info.activation_info.activation())));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.a()));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.b()));
- if(_has_pad_y)
- {
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d));
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
- }
-
- std::string kernel_name("gemm_mm_reshaped_only_rhs_");
- kernel_name += rhs_info.transpose ? "t" : "nt";
- kernel_name += rhs_info.export_to_cl_image ? "_texture" : "";
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += (_has_pad_y ? "" : "no_pad_y_");
- _config_id += (_add_bias ? "add_bias_" : "");
- _config_id += (_broadcast_bias ? "broadcast_bias_" : "");
- _config_id += (_reinterpret_input_as_3d ? "3di_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += (gemm_info.activation_info.enabled() ? "fused_activation_" : "");
- _config_id += lower_string(string_from_data_type(input0->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(gemm_info.k);
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.n0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.k0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.h0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.interleave);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
- input1->clone().get(),
- input2 != nullptr ? input2->clone().get() : nullptr,
- output->clone().get(),
- lhs_info,
- rhs_info,
- gemm_info,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- if(_input1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(_input1->info()->strides_in_bytes()[3] != 0);
- }
-
- const size_t lhs_idx_batch_size = _reinterpret_input_as_3d && !_has_pad_y ? 3u : 2u;
- const size_t rhs_idx_batch_size = 2u;
- const size_t bia_idx_batch_size = 2u;
- const size_t out_idx_batch_size = _reinterpret_output_as_3d && !_has_pad_y ? 3u : 2u;
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- // Get cross plane pads
- const unsigned int total_cross_plane_pad_lhs = _input0->info()->padding().top + _input0->info()->padding().bottom;
- const unsigned int total_cross_plane_pad_out = _output->info()->padding().top + _output->info()->padding().bottom;
-
- // The execution should fail if we try to run with has_pad_y = false but we have padding in either the LHS or DST tensor
- ARM_COMPUTE_ERROR_ON(!_has_pad_y && ((total_cross_plane_pad_lhs != 0) || (total_cross_plane_pad_out != 0)));
-
- cl::Image2D input1_image2d;
-
- if(_export_to_cl_image)
- {
- const TensorShape shape2d(_input1->info()->dimension(0) / 4, _input1->info()->dimension(1) * _input1->info()->dimension(2));
- const size_t image_row_pitch = _input1->info()->strides_in_bytes()[1];
-
- input1_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), _input1->cl_buffer(), shape2d, _input1->info()->data_type(), image_row_pitch);
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
-
- // LHS buffer
- add_2D_tensor_argument(idx, _input0, slice);
-
- // RHS buffer or RHS OpenCL image (_export_to_cl_image == true)
- if(_export_to_cl_image)
- {
- _kernel.setArg(idx++, input1_image2d);
- }
- else
- {
- add_2D_tensor_argument(idx, _input1, slice_b);
- }
-
- // Bias buffer (_add_bias == true)
- add_2D_tensor_argument_if(_add_bias, idx, _input2, slice);
-
- // Output buffer
- add_2D_tensor_argument(idx, _output, slice);
-
- // LHS stride_z
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input0->info()->strides_in_bytes()[lhs_idx_batch_size]));
-
- // RHS stride_z (not used if _export_to_cl_image == true)
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input1->info()->strides_in_bytes()[rhs_idx_batch_size]));
-
- // Bias stride_z (if _add_bias == true)
- if(_add_bias)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input2->info()->strides_in_bytes()[bia_idx_batch_size]));
- }
-
- // Output stride_z
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[out_idx_batch_size]));
-
- // Cross-plan padding (if _reinterpret_input_as_3d = true)
- if(_reinterpret_input_as_3d && _has_pad_y)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad_lhs));
- }
-
- // Cross-plan padding (if _reinterpret_output_as_3d = true)
- if(_reinterpret_output_as_3d && _has_pad_y)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad_out));
- }
-
- enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h
deleted file mode 100644
index 5b96679a46..0000000000
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDONLYRHSKERNEL_H
-#define ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDONLYRHSKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-#include "arm_compute/core/KernelDescriptors.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to multiply matrices when only the input matrix RHS (input1) has been reshaped
- *
- * @note The input matrix input1 must be reshaped through @ref CLGEMMReshapeRHSMatrixKernel
- */
-class CLGEMMMatrixMultiplyReshapedOnlyRHSKernel : public ICLKernel
-{
-public:
- /** Default Constructor */
- CLGEMMMatrixMultiplyReshapedOnlyRHSKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMMatrixMultiplyReshapedOnlyRHSKernel(const CLGEMMMatrixMultiplyReshapedOnlyRHSKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMMatrixMultiplyReshapedOnlyRHSKernel &operator=(const CLGEMMMatrixMultiplyReshapedOnlyRHSKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMMatrixMultiplyReshapedOnlyRHSKernel(CLGEMMMatrixMultiplyReshapedOnlyRHSKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMMatrixMultiplyReshapedOnlyRHSKernel &operator=(CLGEMMMatrixMultiplyReshapedOnlyRHSKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
- * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
- * the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# The stride Y for the input1 should satisfy the OpenCL pitch alignment requirement
- * -# input1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# input1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- *
- * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true).
- * The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0.
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread. Only the following values are supported:
- * lhs_info.m0: 1,2,3,4,5,6,7,8
- * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
- * rhs_info.k0: 2,3,4,8,16
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.transpose: true,false
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- */
- void configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
- /** Initialise the kernel's input and output.
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
- * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
- * the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# The stride Y for the input1 should satisfy the OpenCL pitch alignment requirement
- * -# input1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# input1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true).
- * The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0.
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread. Only the following values are supported:
- * lhs_info.m0: 1,2,3,4,5,6,7,8
- * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
- * rhs_info.k0: 2,3,4,8,16
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.transpose: true,false
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyReshapedOnlyRHSKernel
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
- * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
- * the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# The stride Y for the input1 should satisfy the OpenCL pitch alignment requirement
- * -# input1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# input1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- *
- * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true).
- * The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] input1 Input tensor info for the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[in] input2 Input tensor info containing the bias matrix. Data type supported: same as @p input0.
- * @param[in] output Output tensor info. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread. Only the following values are supported:
- * lhs_info.m0: 1,2,3,4,5,6,7,8
- * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
- * rhs_info.k0: 2,3,4,8,16
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.transpose: true,false
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input0;
- const ICLTensor *_input1;
- const ICLTensor *_input2;
- ICLTensor *_output;
- bool _slide_matrix_b;
- bool _reinterpret_input_as_3d;
- bool _reinterpret_output_as_3d;
- bool _use_dummy_work_items;
- bool _add_bias;
- bool _broadcast_bias;
- bool _export_to_cl_image;
- bool _has_pad_y;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDONLYRHSKERNEL_H*/
diff --git a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
deleted file mode 100644
index 52510075b7..0000000000
--- a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.v0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((lhs_info.k0 & (lhs_info.k0 - 1)) && lhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8);
-
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
-
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), compute_lhs_reshaped_shape(*input, lhs_info, reinterpret_input_as_3d));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
-{
- const unsigned int num_elems_processed_per_iteration_x = lhs_info.k0;
- const unsigned int num_elems_processed_per_iteration_y = lhs_info.m0;
- bool window_changed = false;
-
- TensorInfo tmp_info(*input);
-
- if(reinterpret_input_as_3d)
- {
- // Since the input tensor has to be reinterpreted as 3D and the execute window is based on a 2D interleave,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(input->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_lhs_reshaped_shape(*input, lhs_info, reinterpret_input_as_3d)));
-
- // Configure window
- Window win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- Window win_in = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowStatic input_access(input, 0, 0,
- input->dimension(0),
- input->dimension(1));
- AccessWindowStatic output_access(output, 0, 0, output->dimension(0), output->dimension(1));
-
- window_changed = update_window_and_padding(win_in, input_access) || // window used by the execute_window_loop
- update_window_and_padding(win, output_access); // window used to update the padding requirements of output tensor
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win.collapse(win, Window::DimZ);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-CLGEMMReshapeLHSMatrixKernel::CLGEMMReshapeLHSMatrixKernel()
- : _input(nullptr), _output(nullptr), _reinterpret_input_as_3d(false)
-{
-}
-
-void CLGEMMReshapeLHSMatrixKernel::configure(const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, lhs_info, reinterpret_input_as_3d);
-}
-
-void CLGEMMReshapeLHSMatrixKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Perform validate step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), lhs_info, reinterpret_input_as_3d));
-
- auto padding_info = get_padding_info({ input });
-
- _input = input;
- _output = output;
- _reinterpret_input_as_3d = reinterpret_input_as_3d;
-
- const unsigned int src_w = input->info()->dimension(0);
- const unsigned int src_h = _reinterpret_input_as_3d ? input->info()->dimension(1) * input->info()->dimension(2) : input->info()->dimension(1);
- const unsigned int partial_load_m0 = src_h % lhs_info.m0;
- const unsigned int partial_load_k0 = src_w % lhs_info.k0;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
- build_opts.add_option("-DV0=" + support::cpp11::to_string(lhs_info.v0));
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src_w));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src_h));
- build_opts.add_option_if(lhs_info.interleave, "-DINTERLEAVE");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(input->info()->dimension(1)));
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(input->info()->dimension(2)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size()));
- build_opts.add_option("-DPARTIAL_LOAD_M0=" + support::cpp11::to_string(partial_load_m0));
- build_opts.add_option("-DPARTIAL_LOAD_K0=" + support::cpp11::to_string(partial_load_k0));
-
- std::string kernel_name("gemm_reshape_lhs_matrix_");
- kernel_name += lhs_info.transpose ? "t" : "nt";
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), lhs_info, reinterpret_input_as_3d);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // Set config_id for enabling LWS tuning
- _config_id = "gemm_reshape_lhs_matrix_";
- _config_id += (_reinterpret_input_as_3d ? "3d_" : "");
- _config_id += lower_string(string_from_data_type(input->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.k0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.v0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.interleave);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.transpose);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLGEMMReshapeLHSMatrixKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, lhs_info, reinterpret_input_as_3d));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), lhs_info, reinterpret_input_as_3d).first);
-
- return Status{};
-}
-
-void CLGEMMReshapeLHSMatrixKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window slice = window.first_slice_window_3D();
-
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 2 * num_arguments_per_3D_tensor();
- const unsigned int total_cross_plane_pad = _input->info()->padding().top + _input->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h
deleted file mode 100644
index 92202a26fc..0000000000
--- a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMRESHAPELHSMATRIXKERNEL_H
-#define ARM_COMPUTE_CLGEMMRESHAPELHSMATRIXKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to reshape the LHS matrix when performing the matrix multiplication.
- * In particular, this function splits the input matrix in blocks of size M0xK0 (defined through GEMMLHSInfo) and
- * stores each one in the output matrix unrolling the values
- */
-class CLGEMMReshapeLHSMatrixKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLGEMMReshapeLHSMatrixKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMReshapeLHSMatrixKernel(const CLGEMMReshapeLHSMatrixKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMReshapeLHSMatrixKernel &operator=(const CLGEMMReshapeLHSMatrixKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMReshapeLHSMatrixKernel(CLGEMMReshapeLHSMatrixKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMReshapeLHSMatrixKernel &operator=(CLGEMMReshapeLHSMatrixKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] input Input tensor. Data types supported: All
- * @param[out] output Output tensor. Data type supported: same as @p input
- * @param[in] lhs_info LHS matrix information to be used for reshaping. This object contains all the necessary
- * information to reshape the input tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.v0: greater than 0
- * lhs_info.transpose: true, false
- * lhs_info.interleave: true, false
- * @param[in] reinterpret_input_as_3d (Optional) True if the input has to be reinterpreted as 3D tensor
- */
- void configure(const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data types supported: All
- * @param[out] output Output tensor. Data type supported: same as @p input
- * @param[in] lhs_info LHS matrix information to be used for reshaping. This object contains all the necessary
- * information to reshape the input tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.v0: greater than 0
- * lhs_info.transpose: true, false
- * lhs_info.interleave: true, false
- * @param[in] reinterpret_input_as_3d (Optional) True if the input has to be reinterpreted as 3D tensor
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMReshapeLHSMatrixKernel
- *
- * @param[in] input Input tensor info. Data types supported: All
- * @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input.
- * @param[in] lhs_info LHS matrix information to be used for reshaping. This object contains all the necessary
- * information to reshape the input tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.v0: greater than 0
- * lhs_info.transpose: true, false
- * lhs_info.interleave: true, false
- * @param[in] reinterpret_input_as_3d True if the input has to be reinterpreted as 3D tensor
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d);
-
- // Inherited methods overridden
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
- bool _reinterpret_input_as_3d;
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLGEMMRESHAPELHSMATRIXKERNEL_H */ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp
deleted file mode 100644
index 33de61ed01..0000000000
--- a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/CL/gemm/CLGEMMHelpers.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const GEMMRHSMatrixInfo &rhs_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.n0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.h0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.k0 & (rhs_info.k0 - 1)) && (rhs_info.k0 != 1) && (rhs_info.k0 != 3)), "Only 1,2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.n0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON((rhs_info.k0 == 1) && (rhs_info.transpose));
-
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
-
- if(rhs_info.export_to_cl_image)
- {
- const TensorInfo tensor_reshaped_info(compute_rhs_reshaped_shape(*input, rhs_info), 1, input->data_type());
- ARM_COMPUTE_RETURN_ON_ERROR(cl_gemm::validate_image2d_support_on_rhs(tensor_reshaped_info, rhs_info));
- }
-
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), compute_rhs_reshaped_shape(*input, rhs_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const GEMMRHSMatrixInfo &rhs_info)
-{
- const unsigned int num_elems_processed_per_iteration_x = rhs_info.n0;
- const unsigned int num_elems_processed_per_iteration_y = rhs_info.k0;
- bool window_changed = false;
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*input, rhs_info)));
-
- // Configure window
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
- AccessWindowStatic output_access(output, 0, 0, output->dimension(0), output->dimension(1));
-
- window_changed = update_window_and_padding(win, input_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
-
- if(rhs_info.export_to_cl_image)
- {
- arm_compute::cl_gemm::update_padding_for_cl_image(output);
- }
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win.collapse(win, Window::DimZ);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-CLGEMMReshapeRHSMatrixKernel::CLGEMMReshapeRHSMatrixKernel()
- : _input(nullptr), _output(nullptr)
-{
-}
-
-void CLGEMMReshapeRHSMatrixKernel::configure(const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, rhs_info);
-}
-
-void CLGEMMReshapeRHSMatrixKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Perform validate step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), rhs_info));
-
- _input = input;
- _output = output;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
- build_opts.add_option_if(rhs_info.transpose, "-DTRANSPOSE");
- build_opts.add_option_if(rhs_info.interleave, "-DINTERLEAVE");
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size()));
-
- std::string kernel_name("gemm_reshape_rhs_matrix_");
- kernel_name += rhs_info.transpose ? "t" : "nt";
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), rhs_info);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-}
-
-Status CLGEMMReshapeRHSMatrixKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMRHSMatrixInfo &rhs_info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, rhs_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), rhs_info).first);
-
- return Status{};
-}
-
-void CLGEMMReshapeRHSMatrixKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window slice = window.first_slice_window_3D();
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h
deleted file mode 100644
index 911484ea76..0000000000
--- a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLGEMMRESHAPERHSMATRIXKERNEL_H
-#define ARM_COMPUTE_CLGEMMRESHAPERHSMATRIXKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to reshape the RHS matrix when performing the matrix multiplication
- * In particular, this kernel splits the input matrix in blocks of size K0xN0 and stores each one in
- * the output matrix unrolling the values */
-class CLGEMMReshapeRHSMatrixKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLGEMMReshapeRHSMatrixKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMReshapeRHSMatrixKernel(const CLGEMMReshapeRHSMatrixKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLGEMMReshapeRHSMatrixKernel &operator=(const CLGEMMReshapeRHSMatrixKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLGEMMReshapeRHSMatrixKernel(CLGEMMReshapeRHSMatrixKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLGEMMReshapeRHSMatrixKernel &operator=(CLGEMMReshapeRHSMatrixKernel &&) = default;
- /** Default destructor */
- ~CLGEMMReshapeRHSMatrixKernel() = default;
- /** Initialise the kernel's input and output.
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will guarantee the OpenCL pitch alignment for the output tensor,
- * required to create a OpenCL image object from buffer in @ref CLGEMMMatrixMultiplyReshapedKernel and in @ref CLGEMMMatrixMultiplyReshapedOnlyRHSKernel
- * Since the OpenCL image object is created importing the OpenCL buffer, the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32, F16
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# output width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# output (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- * -# The output tensor should be only consumed by @ref CLGEMMMatrixMultiplyReshapedKernel or @ref CLGEMMMatrixMultiplyReshapedOnlyRHSKernel
- *
- * @param[in] input Input tensor. Data types supported: All
- * @param[out] output Output tensor. Data type supported: same as @p input
- * @param[in] rhs_info RHS matrix information to be used for reshaping. This object contains all the necessary
- * information to reshape the input tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image == true)
- * rhs_info.k0: 1,2,3,4,8,16 (k0 = 1 only if rhs_info.transpose = false), (only 4, 8 and 16 if rhs_info.export_to_cl_image == true)
- * rhs_info.h0: greater than 0
- * rhs_info.transpose: true, false
- * rhs_info.interleave: true, false
- */
- void configure(const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info);
- /** Initialise the kernel's input and output.
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will guarantee the OpenCL pitch alignment for the output tensor,
- * required to create a OpenCL image object from buffer in @ref CLGEMMMatrixMultiplyReshapedKernel and in @ref CLGEMMMatrixMultiplyReshapedOnlyRHSKernel
- * Since the OpenCL image object is created importing the OpenCL buffer, the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32, F16
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# output width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# output (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- * -# The output tensor should be only consumed by @ref CLGEMMMatrixMultiplyReshapedKernel or @ref CLGEMMMatrixMultiplyReshapedOnlyRHSKernel
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data types supported: All
- * @param[out] output Output tensor. Data type supported: same as @p input
- * @param[in] rhs_info RHS matrix information to be used for reshaping. This object contains all the necessary
- * information to reshape the input tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image == true)
- * rhs_info.k0: 1,2,3,4,8,16 (k0 = 1 only if rhs_info.transpose = false), (only 4, 8 and 16 if rhs_info.export_to_cl_image == true)
- * rhs_info.h0: greater than 0
- * rhs_info.transpose: true, false
- * rhs_info.interleave: true, false
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMReshapeRHSMatrixKernel
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will guarantee the OpenCL pitch alignment for the output tensor,
- * required to create a OpenCL image object from buffer in @ref CLGEMMMatrixMultiplyReshapedKernel and in @ref CLGEMMMatrixMultiplyReshapedOnlyRHSKernel
- * Since the OpenCL image object is created importing the OpenCL buffer, the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32, F16
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# output width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# output (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- * -# The output tensor should be only consumed by @ref CLGEMMMatrixMultiplyReshapedKernel or @ref CLGEMMMatrixMultiplyReshapedOnlyRHSKernel
- *
- * @param[in] input Input tensor info. Data types supported: All
- * @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input.
- * @param[in] rhs_info RHS matrix information to be used for reshaping. This object contains all the necessary
- * information to reshape the input tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image == true)
- * rhs_info.k0: 1,2,3,4,8,16 (k0 = 1 only if rhs_info.transpose = false),(only 4, 8 and 16 if rhs_info.export_to_cl_image == true)
- * rhs_info.h0: greater than 0
- * rhs_info.transpose: true, false
- * rhs_info.interleave: true, false
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMRHSMatrixInfo &rhs_info);
-
- // Inherited methods overridden
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLGEMMRESHAPERHSMATRIXKERNEL_H */ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLGatherKernel.cpp b/src/core/CL/kernels/CLGatherKernel.cpp
index e33bc7afd7..904bb07282 100644
--- a/src/core/CL/kernels/CLGatherKernel.cpp
+++ b/src/core/CL/kernels/CLGatherKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,8 +22,10 @@
* SOFTWARE.
*/
#include "src/core/CL/kernels/CLGatherKernel.h"
+
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
@@ -34,20 +36,22 @@ namespace arm_compute
{
namespace
{
-inline Status validate_arguments(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis)
+inline Status
+validate_arguments(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, indices, output);
const uint32_t actual_axis = wrap_around(axis, static_cast<int>(input->num_dimensions()));
- ARM_COMPUTE_RETURN_ERROR_ON(indices->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
+ ARM_COMPUTE_RETURN_ERROR_ON((input->num_dimensions() + indices->num_dimensions() - 1) > 4);
+
ARM_COMPUTE_RETURN_ERROR_ON(actual_axis >= input->num_dimensions());
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input->tensor_shape(), indices->tensor_shape(), actual_axis);
+ TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(
+ input->tensor_shape(), indices->tensor_shape(), actual_axis);
ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size());
}
@@ -56,26 +60,27 @@ inline Status validate_arguments(const ITensorInfo *input, const ITensorInfo *in
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *indices, ITensorInfo *output, int axis)
+std::pair<Status, Window>
+validate_and_configure_window(ITensorInfo *input, ITensorInfo *indices, ITensorInfo *output, int axis)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, indices);
const uint32_t actual_axis = wrap_around(axis, static_cast<int>(input->num_dimensions()));
// Output auto initialization if not yet initialized
- TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input->tensor_shape(), indices->tensor_shape(), actual_axis);
+ TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(
+ input->tensor_shape(), indices->tensor_shape(), actual_axis);
auto_init_if_empty((*output), output_shape, 1, input->data_type());
// Create window
Window win = calculate_max_window(*output, Steps());
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
return std::make_pair(Status{}, win);
}
} // namespace
-CLGatherKernel::CLGatherKernel()
- : _input(nullptr), _indices(nullptr), _output(nullptr), _axis(0)
+CLGatherKernel::CLGatherKernel() : _input(nullptr), _indices(nullptr), _output(nullptr), _axis(0)
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLGatherKernel::configure(const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis)
@@ -83,10 +88,14 @@ void CLGatherKernel::configure(const ICLTensor *input, const ICLTensor *indices,
configure(CLKernelLibrary::get().get_compile_context(), input, indices, output, axis);
}
-void CLGatherKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis)
+void CLGatherKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *indices,
+ ICLTensor *output,
+ int axis)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, indices);
- auto padding_info = get_padding_info({ input, output, indices });
+ auto padding_info = get_padding_info({input, output, indices});
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), indices->info(), output->info(), axis));
// Configure kernel window
@@ -100,10 +109,12 @@ void CLGatherKernel::configure(const CLCompileContext &compile_context, const IC
// Set build options
CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(input->info()->data_type())));
+ build_opts.add_option("-DDATA_TYPE=" +
+ get_cl_unsigned_type_from_element_size(data_size_from_type(input->info()->data_type())));
build_opts.add_option("-DOUTPUT_DIM_Z=" + support::cpp11::to_string(output->info()->dimension(2)));
- build_opts.add_option("-DINPUT_DIM_Z=" + support::cpp11::to_string(input->info()->dimension(2)));
+ build_opts.add_option("-DINDICES_DIMS=" + support::cpp11::to_string(indices->info()->num_dimensions()));
build_opts.add_option("-DAXIS=" + support::cpp11::to_string(_axis));
+ build_opts.add_option("-DINDEX_LIMIT=" + support::cpp11::to_string(input->info()->tensor_shape()[_axis]));
// Create kernel
_kernel = create_kernel(compile_context, "gather", build_opts.options());
@@ -111,10 +122,12 @@ void CLGatherKernel::configure(const CLCompileContext &compile_context, const IC
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLGatherKernel::validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis)
+Status
+CLGatherKernel::validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, indices, output, axis));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), indices->clone().get(), output->clone().get(), axis).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_and_configure_window(input->clone().get(), indices->clone().get(), output->clone().get(), axis).first);
return Status{};
}
@@ -126,7 +139,7 @@ void CLGatherKernel::run(const Window &window, cl::CommandQueue &queue)
Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
unsigned int idx = 0;
add_4D_tensor_argument(idx, _input, window_collapsed);
- add_1D_tensor_argument(idx, _indices, window_collapsed);
+ add_4D_tensor_argument(idx, _indices, window_collapsed);
add_4D_tensor_argument(idx, _output, window_collapsed);
enqueue(queue, *this, window_collapsed, lws_hint());
}
diff --git a/src/core/CL/kernels/CLGatherKernel.h b/src/core/CL/kernels/CLGatherKernel.h
index 8f472a4696..db4b49d2f5 100644
--- a/src/core/CL/kernels/CLGatherKernel.h
+++ b/src/core/CL/kernels/CLGatherKernel.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLGATHERKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -63,7 +64,11 @@ public:
* @param[out] output Destination tensor. Data type supported: Same as @p input
* @param[in] axis (Optional) The axis in @p input to gather @p indices from. Negative values wrap around. Defaults to 0
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis = 0);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *indices,
+ ICLTensor *output,
+ int axis = 0);
/** Static function to check if given info will lead to a valid configuration of @ref CLGatherKernel
*
@@ -74,7 +79,8 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis = 0);
+ static Status
+ validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis = 0);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp
index 988bb39d88..b9ff72b928 100644
--- a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp
+++ b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,13 +25,13 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLArray.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
-#include "src/core/AccessWindowStatic.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -48,7 +48,7 @@ Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anc
ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi());
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::QSYMM16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2);
- if(all_anchors->total_size() > 0)
+ if (all_anchors->total_size() > 0)
{
size_t feature_height = info.feat_height();
size_t feature_width = info.feat_width();
@@ -58,7 +58,7 @@ Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anc
ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi());
ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors);
- if(is_data_type_quantized(anchors->data_type()))
+ if (is_data_type_quantized(anchors->data_type()))
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(anchors, all_anchors);
}
@@ -67,20 +67,25 @@ Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anc
}
} // namespace
-CLComputeAllAnchorsKernel::CLComputeAllAnchorsKernel()
- : _anchors(nullptr), _all_anchors(nullptr)
+CLComputeAllAnchorsKernel::CLComputeAllAnchorsKernel() : _anchors(nullptr), _all_anchors(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
+void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors,
+ ICLTensor *all_anchors,
+ const ComputeAnchorsInfo &info)
{
configure(CLKernelLibrary::get().get_compile_context(), anchors, all_anchors, info);
}
-void CLComputeAllAnchorsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
+void CLComputeAllAnchorsKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *anchors,
+ ICLTensor *all_anchors,
+ const ComputeAnchorsInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(anchors, all_anchors);
- auto padding_info = get_padding_info({ anchors, all_anchors });
+ auto padding_info = get_padding_info({anchors, all_anchors});
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(anchors->info(), all_anchors->info(), info));
// Metadata
@@ -91,7 +96,8 @@ void CLComputeAllAnchorsKernel::configure(const CLCompileContext &compile_contex
// Initialize the output if empty
const TensorShape output_shape(info.values_per_roi(), width * height * num_anchors);
- auto_init_if_empty(*all_anchors->info(), TensorInfo(output_shape, 1, data_type, anchors->info()->quantization_info()));
+ auto_init_if_empty(*all_anchors->info(),
+ TensorInfo(output_shape, 1, data_type, anchors->info()->quantization_info()));
// Set instance variables
_anchors = anchors;
@@ -108,7 +114,7 @@ void CLComputeAllAnchorsKernel::configure(const CLCompileContext &compile_contex
build_opts.add_option("-DNUM_ANCHORS=" + support::cpp11::to_string(num_anchors));
build_opts.add_option("-DNUM_ROI_FIELDS=" + support::cpp11::to_string(info.values_per_roi()));
- if(is_quantized)
+ if (is_quantized)
{
const UniformQuantizationInfo qinfo = anchors->info()->quantization_info().uniform();
build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
@@ -116,8 +122,9 @@ void CLComputeAllAnchorsKernel::configure(const CLCompileContext &compile_contex
}
// Create kernel
- const std::string kernel_name = (is_quantized) ? "generate_proposals_compute_all_anchors_quantized" : "generate_proposals_compute_all_anchors";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
+ const std::string kernel_name =
+ (is_quantized) ? "generate_proposals_compute_all_anchors_quantized" : "generate_proposals_compute_all_anchors";
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// The tensor all_anchors can be interpreted as an array of structs (each structs has values_per_roi fields).
// This means we don't need to pad on the X dimension, as we know in advance how many fields
@@ -127,7 +134,9 @@ void CLComputeAllAnchorsKernel::configure(const CLCompileContext &compile_contex
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLComputeAllAnchorsKernel::validate(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
+Status CLComputeAllAnchorsKernel::validate(const ITensorInfo *anchors,
+ const ITensorInfo *all_anchors,
+ const ComputeAnchorsInfo &info)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(anchors, all_anchors, info));
return Status{};
diff --git a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.h b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.h
index d26795ac7d..e08f281d6c 100644
--- a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.h
+++ b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.h
@@ -62,7 +62,10 @@ public:
* @param[in] info Contains Compute Anchors operation information described in @ref ComputeAnchorsInfo
*
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *anchors,
+ ICLTensor *all_anchors,
+ const ComputeAnchorsInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLComputeAllAnchorsKernel
*
@@ -81,5 +84,5 @@ private:
const ICLTensor *_anchors;
ICLTensor *_all_anchors;
};
-} // arm_compute
+} // namespace arm_compute
#endif // ARM_COMPUTE_CLGENERATEPROSPOSALSLAYERKERNEL_H
diff --git a/src/core/CL/kernels/CLIm2ColKernel.cpp b/src/core/CL/kernels/CLIm2ColKernel.cpp
deleted file mode 100644
index 07309de83c..0000000000
--- a/src/core/CL/kernels/CLIm2ColKernel.cpp
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLIm2ColKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-#include <cmath>
-#include <tuple>
-#include <utility>
-
-namespace arm_compute
-{
-using namespace misc::shape_calculator;
-
-namespace
-{
-struct Im2ColConfiguration
-{
- std::string kernel_name{};
- std::set<std::string> build_options{};
- unsigned int num_elems_processed_per_iteration{};
- bool is_padding_required_nchw{};
-};
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation,
- unsigned int num_groups)
-{
- const unsigned int channel_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
-
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(input->data_type()) && has_bias);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
- ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1));
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON(num_groups == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::NHWC && num_groups > 1);
- ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(channel_idx) % num_groups) != 0);
-
- // Since there's no implicit padding added, check the total input spatial dimensions (with conv paddings) are big enough for the kernel dimensions
- const unsigned int width_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
- const unsigned int height_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
- const unsigned total_width = input->dimension(width_idx) + conv_info.pad_left() + conv_info.pad_right();
- const unsigned total_height = input->dimension(height_idx) + conv_info.pad_top() + conv_info.pad_bottom();
- ARM_COMPUTE_RETURN_ERROR_ON((total_width < kernel_dims.width) || (total_height < kernel_dims.height));
-
- if(output->total_size() > 0)
- {
- const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_im2col_conv_shape(input, kernel_dims, conv_info, has_bias, dilation, num_groups == 1, num_groups));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation,
- unsigned int num_elems_processed_per_iteration, bool is_padding_required_nchw, unsigned int num_groups)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output tensor auto initialization if not yet initialized
- TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, has_bias, dilation, num_groups == 1, num_groups);
-
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(expected_output_shape));
-
- const DataLayout data_layout = input->data_layout();
- const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const unsigned int input_width = input->dimension(width_idx);
- const unsigned int input_height = input->dimension(height_idx);
-
- // Configure the execute window based on the selected optimal OpenCL kernel
- bool window_changed = false;
- Window win;
-
- if(data_layout == DataLayout::NHWC)
- {
- win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
-
- const int xin_start = 0;
- const int xin_end = input->dimension(0);
- const int yin_start = 0;
- const int yin_end = input->dimension(1);
-
- const int xout_start = 0;
- const int xout_end = output->dimension(0);
- const int yout_start = 0;
- const int yout_end = output->dimension(1);
-
- AccessWindowStatic input_access(input, xin_start, yin_start, xin_end, yin_end);
- AccessWindowStatic output_access(output, xout_start, yout_start, xout_end, yout_end);
- window_changed = window_changed || update_window_and_padding(win, input_access, output_access);
- }
- else
- {
- if(is_padding_required_nchw)
- {
- const BorderSize border(conv_info.pad_top(), conv_info.pad_right(), conv_info.pad_bottom(), conv_info.pad_left());
- win = calculate_max_window(*input,
- Steps(num_elems_processed_per_iteration * conv_info.stride().first, conv_info.stride().second));
- AccessWindowStatic input_access(input,
- -border.left,
- -border.top,
- ceil_to_multiple(input_width + border.right, kernel_dims.width * num_elems_processed_per_iteration),
- input_height + border.bottom);
- window_changed = window_changed || update_window_and_padding(win, input_access);
- }
- else
- {
- // For the generic case, CLIm2ColKernel doesn't need padding (we do not read out-of-bounds elements) so
- // update_window_and_padding() can be skipped
- win = calculate_max_window(*input, Steps());
- }
- }
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
- // set the Z dimension's step same size as the whole dimension so that one can't split across the Z dimension
- win.set_dimension_step(Window::DimZ, win[Window::DimZ].end() - win[Window::DimZ].start());
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-
-Im2ColConfiguration configure_opencl_kernel(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, unsigned int num_groups)
-{
- const DataLayout data_layout = input->data_layout();
- const DataType data_type = input->data_type();
- const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
- const unsigned int input_width = input->dimension(width_idx);
- const unsigned int input_height = input->dimension(height_idx);
- const unsigned int input_channel = input->dimension(channel_idx);
-
- const std::pair<unsigned int, unsigned int> convolved_dims = scaled_dimensions(input_width, input_height, kernel_dims.width, kernel_dims.height, conv_info, dilation);
-
- // Im2Col configuration
- std::string kernel_name = "im2col_generic_";
- CLBuildOptions build_opts;
- unsigned int num_elems_processed_per_iteration = 1;
- bool is_padding_required_nchw = false;
- const UniformQuantizationInfo qinfo = input->quantization_info().uniform();
-
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_opts.add_option("-DELEMENT_SIZE=" + support::cpp11::to_string(input->element_size()));
- build_opts.add_option("-DKERNEL_WIDTH=" + support::cpp11::to_string(kernel_dims.width));
- build_opts.add_option("-DKERNEL_HEIGHT=" + support::cpp11::to_string(kernel_dims.height));
- build_opts.add_option("-DCONVOLVED_WIDTH=" + support::cpp11::to_string(convolved_dims.first));
- build_opts.add_option("-DCONVOLVED_HEIGHT=" + support::cpp11::to_string(convolved_dims.second));
- build_opts.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_info.stride().first));
- build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_info.stride().second));
- build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
- build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
- build_opts.add_option("-DPAD_RIGHT=" + support::cpp11::to_string(conv_info.pad_right()));
- build_opts.add_option("-DPAD_BOTTOM=" + support::cpp11::to_string(conv_info.pad_bottom()));
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(input_width));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input_height));
- build_opts.add_option("-DSRC_DEPTH=" + support::cpp11::to_string(input_channel));
- build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x()));
- build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y()));
- build_opts.add_option_if(num_groups > 1, "-DNUM_GROUPS=" + support::cpp11::to_string(num_groups));
- build_opts.add_option_if_else(is_data_type_quantized(data_type), "-DPAD_VALUE=" + support::cpp11::to_string(qinfo.offset), "-DPAD_VALUE=0");
- build_opts.add_option_if(has_bias, "-DHAS_BIAS");
-
- if(data_layout == DataLayout::NHWC)
- {
- num_elems_processed_per_iteration = std::min(2U, input_channel);
- is_padding_required_nchw = false;
-
- // Only the 3x3 and 9x9 cases are optimized for NHWC
- if(kernel_dims == Size2D(3U, 3U))
- {
- kernel_name = "im2col3x3_";
- }
- else if(kernel_dims == Size2D(9U, 9U))
- {
- kernel_name = "im2col9x9_";
- }
-
- // Get boundary vector (the first/last vector with potentially a partial vector size) size
- // If input_channel is a multiple of num_elems_processed_per_iteration, the boundary vec size is the (full) vector size
- // otherwise, the boundary vec size is the (partial) remainder vector size
- const unsigned int vec_size = num_elems_processed_per_iteration;
- const unsigned int partial_vec_size = input_channel % vec_size;
- const unsigned int boundary_vec_size = vec_size - ((vec_size - partial_vec_size) % vec_size);
- build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vec_size));
- build_opts.add_option("-DBOUNDARY_VECTOR_SIZE=" + support::cpp11::to_string(boundary_vec_size));
- }
- else
- {
- if(dilation == Size2D(1U, 1U))
- {
- const bool squared_im2col = kernel_dims.width == kernel_dims.height;
- if(squared_im2col)
- {
- // Check if we can run an optimized im2col for NCHW
- switch(kernel_dims.width)
- {
- case 1:
- // Optimized im2col1x1 if stride_x = 1 and conv_info.has_padding() = false
- if(conv_info.stride().first == 1 && !conv_info.has_padding())
- {
- kernel_name = "im2col1x1_stridex1_";
- num_elems_processed_per_iteration = 4;
- is_padding_required_nchw = true;
- }
- break;
- case 3:
- kernel_name = "im2col3x3_";
- num_elems_processed_per_iteration = 1;
- is_padding_required_nchw = true;
- break;
- case 5:
- kernel_name = "im2col5x5_";
- num_elems_processed_per_iteration = 1;
- is_padding_required_nchw = true;
- break;
- case 11:
- // Optimized im2col11x11 if pad_x = pad_y = 0
- if(!conv_info.has_padding())
- {
- kernel_name = "im2col11x11_padx0_pady0_";
- num_elems_processed_per_iteration = 1;
- is_padding_required_nchw = true;
- }
- break;
- default:
- kernel_name = "im2col_generic_";
- num_elems_processed_per_iteration = 1;
- is_padding_required_nchw = false;
- break;
- }
- }
- else if(kernel_dims.width > 1 && !conv_info.has_padding())
- {
- kernel_name = "im2col_generic_padx0_pady0_";
- num_elems_processed_per_iteration = 1;
- is_padding_required_nchw = false;
-
- // Optimized im2col is performed using one or more vector operations with the specified vector size
- // and a remainder. For example, for 5x5 convolutions, im2col is performed using vectors of size 4
- // and scalars; for 7x7 convolutions, using vectors of size 4 and vectors of size 3.
- // Using the vector size of 4 is always safe since OpenCL supports vectors of size 2 and 3.
- // Using the vector size of 8, however, may be faster.
- // For 2x2 convolutions, use vectors of size 2. (For 3x3 convolutions, im2col_kernel3x3_padx0_pady0
- // is used instead.)
- const size_t vector_size = std::min(static_cast<size_t>(4), kernel_dims.width);
- const size_t width_mod_vector_size = kernel_dims.width % vector_size;
- build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
- build_opts.add_option("-DWIDTH_MOD_VECTOR_SIZE=" + support::cpp11::to_string(width_mod_vector_size));
- }
- }
- }
-
- // Append the data layout to the kernel_name
- kernel_name += lower_string(string_from_data_layout(data_layout));
-
- Im2ColConfiguration im2col_config;
- im2col_config.kernel_name = kernel_name;
- im2col_config.build_options = build_opts.options();
- im2col_config.num_elems_processed_per_iteration = num_elems_processed_per_iteration;
- im2col_config.is_padding_required_nchw = is_padding_required_nchw;
-
- return im2col_config;
-}
-} // namespace
-
-CLIm2ColKernel::CLIm2ColKernel()
- : _input(nullptr), _output(nullptr), _data_layout(DataLayout::UNKNOWN), _convolved_dims(), _num_elems_processed_per_iteration(1), _kernel_dims(), _conv_info(), _num_groups()
-{
-}
-
-void CLIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation,
- unsigned int num_groups)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, kernel_dims, conv_info, has_bias, dilation, num_groups);
-}
-
-void CLIm2ColKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias,
- const Size2D &dilation,
- unsigned int num_groups)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, dilation, num_groups));
-
- auto padding_info = get_padding_info({ input, output });
- _data_layout = input->info()->data_layout();
-
- const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- const unsigned int input_width = input->info()->dimension(width_idx);
- const unsigned int input_height = input->info()->dimension(height_idx);
-
- // Select and configure the optimal OpenCL kernel to run.
- // This function returns the OpenCL kernel's name, the arguments to pass at compile time, the number of elements processed per iteration
- // and the padding requirement flag
- Im2ColConfiguration im2col_config = configure_opencl_kernel(input->info(), kernel_dims, conv_info, has_bias, dilation, num_groups);
-
- // Create kernel
- _kernel = create_kernel(compile_context, im2col_config.kernel_name, im2col_config.build_options);
-
- _input = input;
- _output = output;
- _convolved_dims = scaled_dimensions(input_width, input_height, kernel_dims.width, kernel_dims.height, conv_info, dilation);
- _num_elems_processed_per_iteration = im2col_config.num_elems_processed_per_iteration;
- _kernel_dims = kernel_dims; // Only needed by the Tuner
- _conv_info = conv_info; // Only needed by the Tuner
- _num_groups = num_groups;
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), kernel_dims, conv_info, has_bias, dilation, im2col_config.num_elems_processed_per_iteration,
- im2col_config.is_padding_required_nchw, num_groups);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // Set config_id for enabling LWS tuning
- _config_id = im2col_config.kernel_name;
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(input->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(num_groups);
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += lower_string(string_from_data_layout(_data_layout));
-
- ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
-}
-
-Status CLIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation,
- unsigned int num_groups)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, dilation, num_groups));
- Im2ColConfiguration im2col_config = configure_opencl_kernel(input, kernel_dims, conv_info, has_bias, dilation, num_groups);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), kernel_dims, conv_info, has_bias, dilation, im2col_config.num_elems_processed_per_iteration,
- im2col_config.is_padding_required_nchw, num_groups)
- .first);
- return Status{};
-}
-
-void CLIm2ColKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
-
- // Get initial windows
- // Collapse in order to have (SRC_DEPTH * BATCH_SIZE) on the 3rd dimension
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- window_collapsed.set_dimension_step(Window::DimZ, 1);
-
- Window window_output;
- window_output.use_tensor_dimensions(_output->info()->tensor_shape());
-
- const Window first_slice_3d = window_collapsed.first_slice_window_3D();
-
- Window slice = first_slice_3d;
- Window slice_in = first_slice_3d;
- Window slice_out = window_output.first_slice_window_2D();
-
- if(_data_layout == DataLayout::NHWC)
- {
- const Window tmp_win = window.collapse_if_possible(ICLKernel::window(), 3);
- const int num_batches = tmp_win[3].end();
-
- slice.set(1, Window::Dimension(0, static_cast<int>(_output->info()->tensor_shape()[1]), 1));
- slice.set(2, Window::Dimension(0, static_cast<int>(num_batches), 1));
- }
- else
- {
- slice.set(0, Window::Dimension(0, static_cast<int>(ceil_to_multiple(_convolved_dims.first, _num_elems_processed_per_iteration)), _num_elems_processed_per_iteration));
- slice.set(1, Window::Dimension(0, static_cast<int>(_convolved_dims.second), 1));
- // Note: In case of NCHW the 3rd dimension is already set collapsing the input window
- }
-
- // Setup input slice
- // The dimensions of the input are increased within the OpenCL kernel
- slice_in.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
- slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- // Setup output slice
- // The dimensions of the output are increased within the OpenCL kernel
- slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
-
- unsigned int idx = num_arguments_per_3D_tensor() + (_num_groups == 1 ? num_arguments_per_2D_tensor() : num_arguments_per_3D_tensor());
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input->info()->strides_in_bytes()[3]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[((_num_groups == 1) ? 2 : 3)]));
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
- if(_num_groups == 1)
- {
- add_2D_tensor_argument(idx, _output, slice_out);
- }
- else
- {
- add_3D_tensor_argument(idx, _output, slice_out);
- }
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice) && window_output.slide_window_slice_2D(slice_out) && window_collapsed.slide_window_slice_3D(slice_in));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLIm2ColKernel.h b/src/core/CL/kernels/CLIm2ColKernel.h
deleted file mode 100644
index 2920c7d138..0000000000
--- a/src/core/CL/kernels/CLIm2ColKernel.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLIM2COLKERNEL_H
-#define ARM_COMPUTE_CLIM2COLKERNEL_H
-
-#include "arm_compute/core/Size2D.h"
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the im2col reshape kernel.
- *
- * Rearranges image blocks into columns. It is used to strip out each convolution block to a single column.
- * It is used to transform a convolution to a plain matrix multiplication.
- *
- * For example taking into account the image below and assuming 3x3 image blocks with stride of 1 we have:
- * @f[
- * \left( \begin{array}{cccc}
- * a00 & a01 & a02 & a03 \\
- * a10 & a11 & a12 & a13 \\
- * a20 & a21 & a22 & a23 \\
- * a30 & a31 & a32 & a33 \\
- * \end{array} \right)
- * =
- * \left( \begin{array}{ccccccccc}
- * a00 & a01 & a02 & a10 & a11 & a12 & a20 & a21 & a22 \\
- * a01 & a02 & a03 & a11 & a12 & a13 & a21 & a22 & a23 \\
- * a10 & a11 & a12 & a20 & a21 & a22 & a30 & a31 & a32 \\
- * a11 & a12 & a13 & a21 & a22 & a23 & a31 & a32 & a33 \\
- * \end{array} \right)
- * @f]
- */
-class CLIm2ColKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLIm2ColKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLIm2ColKernel(const CLIm2ColKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLIm2ColKernel &operator=(const CLIm2ColKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLIm2ColKernel(CLIm2ColKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLIm2ColKernel &operator=(CLIm2ColKernel &&) = default;
- /** Set the input and output of the kernel.
- *
- * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[out] output The output tensor. First 2 lower dimensions represent a transform of each 3D input,
- * while every dimension above represents a batch. Data types supported: Same as @p input
- * @param[in] kernel_dims The kernel dimensions (width and height).
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] has_bias In case biases are provided expands the matrix with 1.
- * This is valid only for non-quantized inputs.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution.
- * Number of groups other than 1 is only supported for NCHW data layout.
- * Number of groups should be multiple to the number of channels.
- */
- void configure(const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation = Size2D(1U, 1U),
- unsigned int num_groups = 1);
- /** Set the input and output of the kernel.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[out] output The output tensor. First 2 lower dimensions represent a transform of each 3D input,
- * while every dimension above represents a batch. Data types supported: Same as @p input
- * @param[in] kernel_dims The kernel dimensions (width and height).
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] has_bias In case biases are provided expands the matrix with 1.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias,
- const Size2D &dilation = Size2D(1U, 1U),
- unsigned int num_groups = 1);
- /** Static function to check if given info will lead to a valid configuration of @ref CLIm2ColKernel
- *
- * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[in] output The output tensor. First 2 lower dimensions represent a transform of each 3D input,
- * while every dimension above represents a batch. Data types supported: Same as @p input
- * @param[in] kernel_dims The kernel dimensions (width and height).
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] has_bias In case biases are provided expands the matrix with 1.
- * This is valid only for non-quantized inputs.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution.
- * Number of groups other than 1 is only supported for NCHW data layout.
- * Number of groups should be multiple to the number of channels.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation = Size2D(1U, 1U),
- unsigned int num_groups = 1);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-public:
- const ICLTensor *_input;
- ICLTensor *_output;
- DataLayout _data_layout;
- std::pair<unsigned int, unsigned int> _convolved_dims;
- unsigned int _num_elems_processed_per_iteration;
- Size2D _kernel_dims;
- PadStrideInfo _conv_info;
- unsigned int _num_groups;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLIM2COLKERNEL_H */
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
index f9e1cbec27..b13eb16556 100644
--- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,65 +29,154 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const InstanceNormalizationLayerKernelInfo &info)
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.epsilon == 0.f, "Epsilon must be different than 0");
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32);
- if(output != nullptr && output->total_size() != 0)
+ if (output != nullptr && output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(),
+ "Input and output have different number of channels");
}
return Status{};
}
-std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
+Status validate_arguments_meanvar(const ITensorInfo *input, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32);
+
+ if (output != nullptr && output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(),
+ "Input and output have different number of channels");
+ }
+
+ return Status{};
+}
+} // namespace
+
+CLComputeMeanVariance::CLComputeMeanVariance() : _input(nullptr), _output(nullptr)
+{
+ _type = CLKernelType::ELEMENTWISE;
+}
+
+void CLComputeMeanVariance::configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output,
+ bool use_mixed_precision)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+ auto padding_info = get_padding_info({input, output});
+
+ _input = input;
+ _output = output == nullptr ? input : output;
+
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_meanvar(_input->info(), _output->info()));
+ const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
+
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DINTERNAL_DATA_TYPE=" +
+ (use_mixed_precision ? "float" : get_cl_type_from_data_type(input->info()->data_type())));
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.add_option("-DDIM_X=" + support::cpp11::to_string(input->info()->dimension(0)));
+ build_opts.add_option("-DDIM_Y=" + support::cpp11::to_string(input->info()->dimension(1)));
+ build_opts.add_option("-DDIM_Z=" + support::cpp11::to_string(input->info()->dimension(2)));
+ build_opts.add_option_if(_input->info()->data_layout() == DataLayout::NHWC, "-DNHWC");
+ // Create kernel
+ _kernel = create_kernel(compile_context, "compute_mean_var", build_opts.options());
+
// We handle the planes manually
- Window win = calculate_max_window(*input, Steps(1));
+ Window win = calculate_max_window(*(input->info()), Steps(1));
+ const auto data_layout = input->info()->data_layout();
+ const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const unsigned int batches_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+ const unsigned int input_channel = input->info()->dimension(channel_idx);
+ const unsigned int input_batches = input->info()->dimension(batches_idx);
+ const TensorShape out_shape(input_channel, 2u, input_batches);
// Output auto initialization if not yet initialized
- auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type());
+ if (use_mixed_precision)
+ {
+ auto_init_if_empty(*_output->info(), out_shape, 1, DataType::F32);
+ }
+ else
+ {
+ auto_init_if_empty(*_output->info(), out_shape, 1, input->info()->data_type());
+ }
+ ICLKernel::configure_internal(win);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+}
- // CLInstanceNormalizationLayerKernel doesn't need padding so update_window_and_padding() can be skipped
- Coordinates coord;
- coord.set_num_dimensions(output->num_dimensions());
- output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
- return std::make_pair(Status{}, win);
+Status CLComputeMeanVariance::validate(const ITensorInfo *input, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_meanvar(input, output));
+ return Status{};
}
-} // namespace
-CLInstanceNormalizationLayerKernel::CLInstanceNormalizationLayerKernel()
- : _input(nullptr), _output(nullptr), _run_in_place(false)
+void CLComputeMeanVariance::run(const Window &window, cl::CommandQueue &queue)
{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+ Window collapsed_window = window.collapse(window, Window::DimZ);
+
+ // We will process the planes together
+ if (_input->info()->data_layout() == DataLayout::NCHW)
+ {
+ collapsed_window.set(Window::DimX, Window::Dimension(0, 1, 1));
+ collapsed_window.set(Window::DimY, Window::Dimension(0, 1, 1));
+ }
+ else
+ {
+ collapsed_window.set(Window::DimZ, Window::Dimension(0, 1, 1));
+ collapsed_window.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(3), 1));
+ }
+ unsigned int idx = 0;
+ add_4D_tensor_argument(idx, _input, collapsed_window);
+ add_3D_tensor_argument(idx, _output, collapsed_window);
+
+ enqueue(queue, *this, collapsed_window, lws_hint());
}
-void CLInstanceNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
+CLInstanceNormalizationLayerKernel::CLInstanceNormalizationLayerKernel()
+ : _input(nullptr), _output(nullptr), _mean(nullptr), _run_in_place(false)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLInstanceNormalizationLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
+void CLInstanceNormalizationLayerKernel::configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *mean_var,
+ ICLTensor *output,
+ const InstanceNormalizationLayerKernelInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
- auto padding_info = get_padding_info({ input, output });
+ auto padding_info = get_padding_info({input, output});
_input = input;
_output = output == nullptr ? input : output;
+ _mean = mean_var;
_run_in_place = (output == nullptr) || (output == input);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(_input->info(), _output->info(), info));
@@ -95,7 +184,9 @@ void CLInstanceNormalizationLayerKernel::configure(const CLCompileContext &compi
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DINTERNAL_DATA_TYPE=" + (info.use_mixed_precision ? "float" : get_cl_type_from_data_type(input->info()->data_type())));
+ build_opts.add_option("-DINTERNAL_DATA_TYPE=" + (info.use_mixed_precision
+ ? "float"
+ : get_cl_type_from_data_type(input->info()->data_type())));
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
build_opts.add_option("-DDIM_X=" + support::cpp11::to_string(input->info()->dimension(0)));
build_opts.add_option("-DDIM_Y=" + support::cpp11::to_string(input->info()->dimension(1)));
@@ -110,16 +201,21 @@ void CLInstanceNormalizationLayerKernel::configure(const CLCompileContext &compi
_kernel = create_kernel(compile_context, "instance_normalization", build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(_input->info(), _output->info());
- ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
- ICLKernel::configure_internal(std::get<1>(win_config));
+ Window win = calculate_max_window(*input->info(), Steps(1));
+ if (output != nullptr)
+ {
+ auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type());
+ }
+
+ ICLKernel::configure_internal(win);
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLInstanceNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info)
+Status CLInstanceNormalizationLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const InstanceNormalizationLayerKernelInfo &info)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, info));
- ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), (output == nullptr ? input->clone().get() : output->clone().get()))));
return Status{};
}
@@ -131,7 +227,7 @@ void CLInstanceNormalizationLayerKernel::run(const Window &window, cl::CommandQu
Window collapsed_window = window.collapse(window, Window::DimZ);
// We will process the planes together
- if(_input->info()->data_layout() == DataLayout::NCHW)
+ if (_input->info()->data_layout() == DataLayout::NCHW)
{
collapsed_window.set(Window::DimX, Window::Dimension(0, 1, 1));
collapsed_window.set(Window::DimY, Window::Dimension(0, 1, 1));
@@ -144,7 +240,9 @@ void CLInstanceNormalizationLayerKernel::run(const Window &window, cl::CommandQu
unsigned int idx = 0;
add_4D_tensor_argument(idx, _input, collapsed_window);
- if(!_run_in_place)
+ add_3D_tensor_argument(idx, _mean, collapsed_window);
+
+ if (!_run_in_place)
{
add_4D_tensor_argument(idx, _output, collapsed_window);
}
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
index d4444f0b20..9f436da7f6 100644
--- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,10 +24,10 @@
#ifndef ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYERKERNEL_H
#define ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYERKERNEL_H
-#include "src/core/CL/ICLKernel.h"
-
#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/CL/ICLKernel.h"
+
namespace arm_compute
{
// Forward declarations
@@ -52,21 +52,18 @@ public:
/** Set the input and output tensors.
*
- * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
- * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
- * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
- * @param[in] info Kernel meta-data descriptor
- */
- void configure(ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info);
- /** Set the input and output tensors.
- *
* @param[in] compile_context The compile context to be used.
* @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
* In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+ * @param[in] mean_var Tensor containing the precomputed mean and variance values. Data types supported: F32.
* @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
* @param[in] info Kernel meta-data descriptor
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info);
+ void configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *mean_var,
+ ICLTensor *output,
+ const InstanceNormalizationLayerKernelInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
*
@@ -76,7 +73,8 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info);
+ static Status
+ validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -84,7 +82,53 @@ public:
private:
ICLTensor *_input;
ICLTensor *_output;
+ ICLTensor *_mean;
bool _run_in_place;
};
+
+/** Interface for compute Mean and Variance per channel */
+class CLComputeMeanVariance : public ICLKernel
+{
+public:
+ /** Constructor */
+ CLComputeMeanVariance();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLComputeMeanVariance(const CLComputeMeanVariance &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLComputeMeanVariance &operator=(const CLComputeMeanVariance &) = delete;
+ /** Default Move Constructor. */
+ CLComputeMeanVariance(CLComputeMeanVariance &&) = default;
+ /** Default move assignment operator */
+ CLComputeMeanVariance &operator=(CLComputeMeanVariance &&) = default;
+ /** Default destructor */
+ ~CLComputeMeanVariance() = default;
+
+ /** Set the input and output tensors.
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
+ * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+ * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
+ * @param[in] use_mixed_precision Use mixed precision in case of FP16 execution
+ */
+ void
+ configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, bool use_mixed_precision);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
+ *
+ * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
+ * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ ICLTensor *_input;
+ ICLTensor *_output;
+};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYERKERNEL_H */
diff --git a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp
index 9e91d98f7c..9ed9d7c5b0 100644
--- a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp
+++ b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,11 +29,12 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "arm_compute/core/Validate.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
namespace arm_compute
@@ -42,9 +43,8 @@ namespace
{
constexpr int max_input_tensor_dim = 3;
-constexpr unsigned int num_elems_processed_per_iteration = 16;
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon)
+Status
+validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon)
{
ARM_COMPUTE_UNUSED(epsilon);
@@ -54,14 +54,15 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, cons
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(actual_axis > 2, "Actual axis greater than 2 is not supported");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(actual_axis >= TensorShape::num_max_dimensions, "Actual normalization axis greater than max number of dimensions");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(actual_axis >= TensorShape::num_max_dimensions,
+ "Actual normalization axis greater than max number of dimensions");
// Reduce shape on axis
TensorShape sum_shape = input->tensor_shape();
sum_shape.set(actual_axis, 1);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(sum->tensor_shape(), sum_shape);
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
@@ -71,40 +72,30 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, cons
return Status{};
}
-
-std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
-{
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type());
-
- AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
-
- bool window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, input->valid_region());
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
-
- return std::make_tuple(err, win);
-}
} // namespace
CLL2NormalizeLayerKernel::CLL2NormalizeLayerKernel()
: _input(nullptr), _sum(nullptr), _output(nullptr), _actual_axis(0), _epsilon(1e-12)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLL2NormalizeLayerKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon)
+void CLL2NormalizeLayerKernel::configure(
+ const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon)
{
configure(CLKernelLibrary::get().get_compile_context(), input, sum, output, axis, epsilon);
}
-void CLL2NormalizeLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon)
+void CLL2NormalizeLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *sum,
+ ICLTensor *output,
+ int axis,
+ float epsilon)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), sum->info(), output->info(), axis, epsilon));
+ auto padding_info = get_padding_info({input, sum, output});
_input = input;
_sum = sum;
@@ -112,35 +103,40 @@ void CLL2NormalizeLayerKernel::configure(const CLCompileContext &compile_context
_actual_axis = wrap_around(axis, max_input_tensor_dim);
_epsilon = epsilon;
+ const unsigned int vec_size_x =
+ adjust_vec_size(max_cl_vector_width / input->info()->element_size(), input->info()->dimension(0));
+ const int vec_size_x_leftovers = input->info()->dimension(0) % vec_size_x;
+
// Set build options
- std::set<std::string> build_opts;
- build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
- build_opts.emplace(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DVEC_SIZE_X=" + support::cpp11::to_string(vec_size_x));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER_X=" + support::cpp11::to_string(vec_size_x_leftovers));
// Create kernel
std::string kernel_name;
unsigned int idx = 0;
- switch(_actual_axis)
+ switch (_actual_axis)
{
case 0:
- kernel_name = "x";
+ kernel_name = "l2_normalize_x";
idx = num_arguments_per_2D_tensor() * 3;
break;
case 1:
- kernel_name = "y";
+ kernel_name = "l2_normalize_y";
idx = num_arguments_per_2D_tensor() * 3;
break;
case 2:
- kernel_name = "z";
+ kernel_name = "l2_normalize_z";
idx = num_arguments_per_3D_tensor() * 3;
break;
default:
ARM_COMPUTE_ERROR("Axis not supported");
}
- _kernel = create_kernel(compile_context, "l2_normalize_" + kernel_name, build_opts);
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set epsilon argument
- if(input->info()->data_type() == DataType::F32)
+ if (input->info()->data_type() == DataType::F32)
{
_kernel.setArg<cl_float>(idx, _epsilon);
}
@@ -150,17 +146,19 @@ void CLL2NormalizeLayerKernel::configure(const CLCompileContext &compile_context
}
// Configure kernel window
- auto win_config = validate_and_configure_window(_input->info(), _output->info());
- ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
+ Window win = calculate_max_window(*input->info(), Steps(vec_size_x));
+
+ // Output tensor auto initialization if not yet initialized
+ auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type());
- ICLKernel::configure_internal(std::get<1>(win_config));
+ ICLKernel::configure_internal(win);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLL2NormalizeLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon)
+Status CLL2NormalizeLayerKernel::validate(
+ const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, sum, output, axis, epsilon));
- ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get())));
-
return Status{};
}
@@ -171,7 +169,7 @@ void CLL2NormalizeLayerKernel::run(const Window &window, cl::CommandQueue &queue
Window window_sum(window);
- switch(_actual_axis)
+ switch (_actual_axis)
{
case 0:
{
@@ -185,8 +183,7 @@ void CLL2NormalizeLayerKernel::run(const Window &window, cl::CommandQueue &queue
add_2D_tensor_argument(idx, _sum, sum_slice);
add_2D_tensor_argument(idx, _output, in_slice);
enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(sum_slice));
+ } while (window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(sum_slice));
}
break;
case 1:
@@ -201,8 +198,7 @@ void CLL2NormalizeLayerKernel::run(const Window &window, cl::CommandQueue &queue
add_2D_tensor_argument(idx, _sum, sum_slice);
add_2D_tensor_argument(idx, _output, in_slice);
enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(sum_slice));
+ } while (window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(sum_slice));
}
break;
case 2:
@@ -217,8 +213,7 @@ void CLL2NormalizeLayerKernel::run(const Window &window, cl::CommandQueue &queue
add_3D_tensor_argument(idx, _sum, sum_slice);
add_3D_tensor_argument(idx, _output, in_slice);
enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(sum_slice));
+ } while (window.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(sum_slice));
}
break;
default:
diff --git a/src/core/CL/kernels/CLL2NormalizeLayerKernel.h b/src/core/CL/kernels/CLL2NormalizeLayerKernel.h
index edc0585217..5c9ab94ce5 100644
--- a/src/core/CL/kernels/CLL2NormalizeLayerKernel.h
+++ b/src/core/CL/kernels/CLL2NormalizeLayerKernel.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLL2NORMALIZELAYERKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -70,7 +71,12 @@ public:
* @param[in] axis Axis along which to reduce. Negative values wrap around. Maximum supported actual reduction axis : 2
* @param[in] epsilon Lower bound value for the normalization.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *sum,
+ ICLTensor *output,
+ int axis,
+ float epsilon);
/** Static function to check if given info will lead to a valid configuration of @ref CLL2NormalizeLayerKernel.
*
@@ -84,7 +90,8 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon);
+ static Status
+ validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLLKTrackerKernel.cpp b/src/core/CL/kernels/CLLKTrackerKernel.cpp
deleted file mode 100644
index a439c2448e..0000000000
--- a/src/core/CL/kernels/CLLKTrackerKernel.cpp
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLLKTrackerKernel.h"
-
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLArray.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Coordinates.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include <cmath>
-
-using namespace arm_compute;
-
-void CLLKTrackerInitKernel::configure(const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates,
- ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
- bool use_initial_estimate, size_t level, size_t num_levels, float pyramid_scale)
-{
- configure(CLKernelLibrary::get().get_compile_context(), old_points, new_points_estimates, old_points_internal, new_points_internal, use_initial_estimate, level, num_levels, pyramid_scale);
-}
-
-void CLLKTrackerInitKernel::configure(const CLCompileContext &compile_context, const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates,
- ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
- bool use_initial_estimate, size_t level, size_t num_levels, float pyramid_scale)
-
-{
- ARM_COMPUTE_ERROR_ON(old_points == nullptr);
- ARM_COMPUTE_ERROR_ON(old_points_internal == nullptr);
- ARM_COMPUTE_ERROR_ON(new_points_internal == nullptr);
-
- const float scale = std::pow(pyramid_scale, level);
-
- // Create kernel
- std::string kernel_name = "init_level";
- if(level == (num_levels - 1))
- {
- kernel_name += (use_initial_estimate) ? std::string("_max_initial_estimate") : std::string("_max");
- }
- _kernel = create_kernel(compile_context, kernel_name);
-
- // Set static kernel arguments
- unsigned int idx = 0;
- if(level == (num_levels - 1))
- {
- _kernel.setArg(idx++, old_points->cl_buffer());
- if(use_initial_estimate)
- {
- _kernel.setArg(idx++, new_points_estimates->cl_buffer());
- }
- }
- _kernel.setArg(idx++, old_points_internal->cl_buffer());
- _kernel.setArg(idx++, new_points_internal->cl_buffer());
- _kernel.setArg<cl_float>(idx++, scale);
-
- // Configure kernel window
- Window window;
- window.set(Window::DimX, Window::Dimension(0, old_points->num_values(), 1));
- window.set(Window::DimY, Window::Dimension(0, 1, 1));
- ICLKernel::configure_internal(window);
-}
-
-void CLLKTrackerInitKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- enqueue(queue, *this, window, lws_hint());
-}
-
-void CLLKTrackerFinalizeKernel::configure(ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points)
-{
- configure(CLKernelLibrary::get().get_compile_context(), new_points_internal, new_points);
-}
-
-void CLLKTrackerFinalizeKernel::configure(const CLCompileContext &compile_context, ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points)
-
-{
- ARM_COMPUTE_ERROR_ON(new_points_internal == nullptr);
- ARM_COMPUTE_ERROR_ON(new_points == nullptr);
-
- // Create kernel
- _kernel = create_kernel(compile_context, "finalize");
-
- // Set static kernel arguments
- unsigned int idx = 0;
- _kernel.setArg(idx++, new_points_internal->cl_buffer());
- _kernel.setArg(idx++, new_points->cl_buffer());
-
- // Configure kernel window
- Window window;
- window.set(Window::DimX, Window::Dimension(0, new_points_internal->num_values(), 1));
- window.set(Window::DimY, Window::Dimension(0, 1, 1));
- ICLKernel::configure_internal(window);
-}
-
-void CLLKTrackerFinalizeKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- enqueue(queue, *this, window, lws_hint());
-}
-
-CLLKTrackerStage0Kernel::CLLKTrackerStage0Kernel()
- : _old_input(nullptr), _old_scharr_gx(nullptr), _old_scharr_gy(nullptr)
-{
-}
-
-void CLLKTrackerStage0Kernel::configure(const ICLTensor *old_input, const ICLTensor *old_scharr_gx, const ICLTensor *old_scharr_gy,
- ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
- ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
- size_t window_dimension, size_t level)
-{
- configure(CLKernelLibrary::get().get_compile_context(), old_input, old_scharr_gx, old_scharr_gy, old_points_internal, new_points_internal, coeff_table, old_ival, window_dimension, level);
-}
-
-void CLLKTrackerStage0Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *old_input, const ICLTensor *old_scharr_gx, const ICLTensor *old_scharr_gy,
- ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
- ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
- size_t window_dimension, size_t level)
-
-{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(old_input, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(old_scharr_gx, 1, DataType::S16);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(old_scharr_gy, 1, DataType::S16);
- ARM_COMPUTE_ERROR_ON(old_points_internal == nullptr);
- ARM_COMPUTE_ERROR_ON(new_points_internal == nullptr);
- ARM_COMPUTE_ERROR_ON(coeff_table == nullptr);
- ARM_COMPUTE_ERROR_ON(old_ival == nullptr);
-
- _old_input = old_input;
- _old_scharr_gx = old_scharr_gx;
- _old_scharr_gy = old_scharr_gy;
-
- // Configure kernel window
- Window window;
- window.set(Window::DimX, Window::Dimension(0, new_points_internal->num_values(), 1));
- window.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- const ValidRegion valid_region = intersect_valid_regions(
- old_input->info()->valid_region(),
- old_scharr_gx->info()->valid_region(),
- old_scharr_gy->info()->valid_region());
-
- update_window_and_padding(window,
- AccessWindowStatic(old_input->info(), valid_region.start(0), valid_region.start(1),
- valid_region.end(0), valid_region.end(1)),
- AccessWindowStatic(old_scharr_gx->info(), valid_region.start(0), valid_region.start(1),
- valid_region.end(0), valid_region.end(1)),
- AccessWindowStatic(old_scharr_gy->info(), valid_region.start(0), valid_region.start(1),
- valid_region.end(0), valid_region.end(1)));
-
- ICLKernel::configure_internal(window);
-
- // Initialize required variables
- const int level0 = (level == 0) ? 1 : 0;
- const int window_size = window_dimension;
- const int window_size_squared = window_dimension * window_dimension;
- const int window_size_half = window_dimension / 2;
- const float eig_const = 1.0f / (2.0f * window_size_squared);
- const cl_float3 border_limits =
- {
- {
- // -1 because we load 2 values at once for bilinear interpolation
- static_cast<cl_float>(valid_region.end(0) - window_size - 1),
- static_cast<cl_float>(valid_region.end(1) - window_size - 1),
- static_cast<cl_float>(valid_region.start(0))
- }
- };
-
- // Create kernel
- _kernel = create_kernel(compile_context, "lktracker_stage0");
-
- // Set arguments
- unsigned int idx = 3 * num_arguments_per_2D_tensor();
- _kernel.setArg(idx++, old_points_internal->cl_buffer());
- _kernel.setArg(idx++, new_points_internal->cl_buffer());
- _kernel.setArg(idx++, coeff_table->cl_buffer());
- _kernel.setArg(idx++, old_ival->cl_buffer());
- _kernel.setArg<cl_int>(idx++, window_size);
- _kernel.setArg<cl_int>(idx++, window_size_squared);
- _kernel.setArg<cl_int>(idx++, window_size_half);
- _kernel.setArg<cl_float3>(idx++, border_limits);
- _kernel.setArg<cl_float>(idx++, eig_const);
- _kernel.setArg<cl_int>(idx++, level0);
-}
-
-void CLLKTrackerStage0Kernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- // Set static tensor arguments. Setting here as allocation might be deferred.
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _old_input, window);
- add_2D_tensor_argument(idx, _old_scharr_gx, window);
- add_2D_tensor_argument(idx, _old_scharr_gy, window);
-
- enqueue(queue, *this, window, lws_hint());
-}
-
-CLLKTrackerStage1Kernel::CLLKTrackerStage1Kernel()
- : _new_input(nullptr)
-{
-}
-
-void CLLKTrackerStage1Kernel::configure(const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
- Termination termination, float epsilon, size_t num_iterations, size_t window_dimension, size_t level)
-{
- configure(CLKernelLibrary::get().get_compile_context(), new_input, new_points_internal, coeff_table, old_ival, termination, epsilon, num_iterations, window_dimension, level);
-}
-
-void CLLKTrackerStage1Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table,
- ICLOldValArray *old_ival,
- Termination termination, float epsilon, size_t num_iterations, size_t window_dimension, size_t level)
-
-{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(new_input, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON(new_points_internal == nullptr);
- ARM_COMPUTE_ERROR_ON(coeff_table == nullptr);
- ARM_COMPUTE_ERROR_ON(old_ival == nullptr);
-
- _new_input = new_input;
-
- // Configure kernel window
- Window window;
- window.set(Window::DimX, Window::Dimension(0, new_points_internal->num_values(), 1));
- window.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- const ValidRegion &valid_region = new_input->info()->valid_region();
-
- update_window_and_padding(window,
- AccessWindowStatic(new_input->info(), valid_region.start(0), valid_region.start(1),
- valid_region.end(0), valid_region.end(1)));
-
- ICLKernel::configure_internal(window);
-
- // Initialize required variables
- const int level0 = (level == 0) ? 1 : 0;
- const int window_size = window_dimension;
- const int window_size_squared = window_dimension * window_dimension;
- const int window_size_half = window_dimension / 2;
- const float eig_const = 1.0f / (2.0f * window_size_squared);
- const cl_float3 border_limits =
- {
- {
- // -1 because we load 2 values at once for bilinear interpolation
- static_cast<cl_float>(valid_region.end(0) - window_size - 1),
- static_cast<cl_float>(valid_region.end(1) - window_size - 1),
- static_cast<cl_float>(valid_region.start(0))
- }
- };
-
- // Set maximum number of iterations used for convergence
- const size_t max_iterations = 1000;
- num_iterations = (termination == Termination::TERM_CRITERIA_EPSILON) ? max_iterations : num_iterations;
-
- const int term_epsilon = (termination == Termination::TERM_CRITERIA_EPSILON || termination == Termination::TERM_CRITERIA_BOTH) ? 1 : 0;
-
- // Create kernel
- _kernel = create_kernel(compile_context, "lktracker_stage1");
-
- // Set static kernel arguments
- unsigned int idx = num_arguments_per_2D_tensor();
- _kernel.setArg(idx++, new_points_internal->cl_buffer());
- _kernel.setArg(idx++, coeff_table->cl_buffer());
- _kernel.setArg(idx++, old_ival->cl_buffer());
- _kernel.setArg<cl_int>(idx++, window_size);
- _kernel.setArg<cl_int>(idx++, window_size_squared);
- _kernel.setArg<cl_int>(idx++, window_size_half);
- _kernel.setArg<cl_int>(idx++, num_iterations);
- _kernel.setArg<cl_float>(idx++, epsilon);
- _kernel.setArg<cl_float3>(idx++, border_limits);
- _kernel.setArg<cl_float>(idx++, eig_const);
- _kernel.setArg<cl_int>(idx++, level0);
- _kernel.setArg<cl_int>(idx++, term_epsilon);
-}
-
-void CLLKTrackerStage1Kernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- // Set static tensor arguments. Setting here as allocation might be deferred.
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _new_input, window);
-
- enqueue(queue, *this, window, lws_hint());
-}
diff --git a/src/core/CL/kernels/CLLKTrackerKernel.h b/src/core/CL/kernels/CLLKTrackerKernel.h
deleted file mode 100644
index 2d2966854a..0000000000
--- a/src/core/CL/kernels/CLLKTrackerKernel.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLLKTRACKERKERNEL_H
-#define ARM_COMPUTE_CLLKTRACKERKERNEL_H
-
-#include "arm_compute/core/CL/ICLArray.h"
-#include "arm_compute/core/Types.h"
-#include "src/core/CL/ICLKernel.h"
-
-#include <cstddef>
-#include <cstdint>
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface to run the initialization step of LKTracker */
-class CLLKTrackerInitKernel : public ICLKernel
-{
-public:
- /** Initialise the kernel input and output
- *
- * @param[in] old_points Pointer to the @ref ICLKeyPointArray storing old key points
- * @param[in] new_points_estimates Pointer to the @ref ICLKeyPointArray storing new estimates key points
- * @param[out] old_points_internal Pointer to the array of internal @ref CLLKInternalKeypoint old points
- * @param[out] new_points_internal Pointer to the array of internal @ref CLLKInternalKeypoint new points
- * @param[in] use_initial_estimate The flag to indicate whether the initial estimated position should be used
- * @param[in] level The pyramid level
- * @param[in] num_levels The number of pyramid levels
- * @param[in] pyramid_scale Scale factor used for generating the pyramid
- */
- void configure(const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates,
- ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
- bool use_initial_estimate, size_t level, size_t num_levels, float pyramid_scale);
- /** Initialise the kernel input and output
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] old_points Pointer to the @ref ICLKeyPointArray storing old key points
- * @param[in] new_points_estimates Pointer to the @ref ICLKeyPointArray storing new estimates key points
- * @param[out] old_points_internal Pointer to the array of internal @ref CLLKInternalKeypoint old points
- * @param[out] new_points_internal Pointer to the array of internal @ref CLLKInternalKeypoint new points
- * @param[in] use_initial_estimate The flag to indicate whether the initial estimated position should be used
- * @param[in] level The pyramid level
- * @param[in] num_levels The number of pyramid levels
- * @param[in] pyramid_scale Scale factor used for generating the pyramid
- */
- void configure(const CLCompileContext &compile_context, const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates,
- ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
- bool use_initial_estimate, size_t level, size_t num_levels, float pyramid_scale);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-};
-
-/** Interface to run the finalize step of LKTracker, where it truncates the coordinates stored in new_points array */
-class CLLKTrackerFinalizeKernel : public ICLKernel
-{
-public:
- /** Initialise the kernel input and output
- *
- * @param[in] new_points_internal Pointer to the array of internal @ref CLLKInternalKeypoint new points
- * @param[out] new_points Pointer to the @ref ICLKeyPointArray storing new key points
- */
- void configure(ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points);
- /** Initialise the kernel input and output
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] new_points_internal Pointer to the array of internal @ref CLLKInternalKeypoint new points
- * @param[out] new_points Pointer to the @ref ICLKeyPointArray storing new key points
- */
- void configure(const CLCompileContext &compile_context, ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-};
-
-/** Interface to run the first stage of LKTracker, where A11, A12, A22, min_eig, ival, ixval and iyval are computed */
-class CLLKTrackerStage0Kernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLLKTrackerStage0Kernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLLKTrackerStage0Kernel(const CLLKTrackerStage0Kernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLLKTrackerStage0Kernel &operator=(const CLLKTrackerStage0Kernel &) = delete;
- /** Allow instances of this class to be moved */
- CLLKTrackerStage0Kernel(CLLKTrackerStage0Kernel &&) = default;
- /** Allow instances of this class to be moved */
- CLLKTrackerStage0Kernel &operator=(CLLKTrackerStage0Kernel &&) = default;
- /** Initialise the kernel input and output
- *
- * @param[in] old_input Pointer to the input old tensor. Data types supported: U8
- * @param[in] old_scharr_gx Pointer to the input scharr X tensor. Data types supported: S16
- * @param[in] old_scharr_gy Pointer to the input scharr Y tensor. Data types supported: S16
- * @param[in] old_points_internal Pointer to the array of CLLKInternalKeypoint old points
- * @param[in, out] new_points_internal Pointer to the array of CLLKInternalKeypoint new points
- * @param[out] coeff_table Pointer to the array holding the Spatial Gradient coefficients
- * @param[out] old_ival Pointer to the array holding internal values
- * @param[in] window_dimension The size of the window on which to perform the algorithm
- * @param[in] level The pyramid level
- */
- void configure(const ICLTensor *old_input, const ICLTensor *old_scharr_gx, const ICLTensor *old_scharr_gy,
- ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
- ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
- size_t window_dimension, size_t level);
- /** Initialise the kernel input and output
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] old_input Pointer to the input old tensor. Data types supported: U8
- * @param[in] old_scharr_gx Pointer to the input scharr X tensor. Data types supported: S16
- * @param[in] old_scharr_gy Pointer to the input scharr Y tensor. Data types supported: S16
- * @param[in] old_points_internal Pointer to the array of CLLKInternalKeypoint old points
- * @param[in, out] new_points_internal Pointer to the array of CLLKInternalKeypoint new points
- * @param[out] coeff_table Pointer to the array holding the Spatial Gradient coefficients
- * @param[out] old_ival Pointer to the array holding internal values
- * @param[in] window_dimension The size of the window on which to perform the algorithm
- * @param[in] level The pyramid level
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *old_input, const ICLTensor *old_scharr_gx, const ICLTensor *old_scharr_gy,
- ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
- ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
- size_t window_dimension, size_t level);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_old_input;
- const ICLTensor *_old_scharr_gx;
- const ICLTensor *_old_scharr_gy;
-};
-
-/** Interface to run the second stage of LKTracker, where the motion vectors of the given points are computed */
-class CLLKTrackerStage1Kernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLLKTrackerStage1Kernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLLKTrackerStage1Kernel(const CLLKTrackerStage1Kernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLLKTrackerStage1Kernel &operator=(const CLLKTrackerStage1Kernel &) = delete;
- /** Allow instances of this class to be moved */
- CLLKTrackerStage1Kernel(CLLKTrackerStage1Kernel &&) = default;
- /** Allow instances of this class to be moved */
- CLLKTrackerStage1Kernel &operator=(CLLKTrackerStage1Kernel &&) = default;
- /** Initialise the kernel input and output
- *
- * @param[in] new_input Pointer to the input new tensor. Data types supported: U8
- * @param[in, out] new_points_internal Pointer to the array of CLLKInternalKeypoint for new points
- * @param[in] coeff_table Pointer to the array holding the Spatial Gradient coefficients
- * @param[in] old_ival Pointer to the array holding internal values
- * @param[in] termination The criteria to terminate the search of each keypoint.
- * @param[in] epsilon The error for terminating the algorithm
- * @param[in] num_iterations The maximum number of iterations before terminating the algorithm
- * @param[in] window_dimension The size of the window on which to perform the algorithm
- * @param[in] level The pyramid level
- */
- void configure(const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
- Termination termination, float epsilon, size_t num_iterations, size_t window_dimension, size_t level);
- /** Initialise the kernel input and output
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] new_input Pointer to the input new tensor. Data types supported: U8
- * @param[in, out] new_points_internal Pointer to the array of CLLKInternalKeypoint for new points
- * @param[in] coeff_table Pointer to the array holding the Spatial Gradient coefficients
- * @param[in] old_ival Pointer to the array holding internal values
- * @param[in] termination The criteria to terminate the search of each keypoint.
- * @param[in] epsilon The error for terminating the algorithm
- * @param[in] num_iterations The maximum number of iterations before terminating the algorithm
- * @param[in] window_dimension The size of the window on which to perform the algorithm
- * @param[in] level The pyramid level
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
- Termination termination, float epsilon, size_t num_iterations, size_t window_dimension, size_t level);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_new_input;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLLKTRACKERKERNEL_H */
diff --git a/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.cpp b/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.cpp
index d0ac12dcd7..e560f1de4a 100644
--- a/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,9 +29,9 @@
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -43,26 +43,31 @@ using namespace misc::shape_calculator;
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const PoolingLayerInfo &pool_info,
+ const ITensorInfo *indices)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, indices);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
+ DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, indices);
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- PoolingType pool_type = pool_info.pool_type;
- const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
+ int pool_stride_x = 0;
+ int pool_stride_y = 0;
+ PoolingType pool_type = pool_info.pool_type;
+ const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
- const int pool_size_x = pool_info.pool_size.width;
- const int pool_size_y = pool_info.pool_size.height;
+ const int pool_size_x = pool_info.pool_size.width;
+ const int pool_size_y = pool_info.pool_size.height;
const Size2D pool_size(pool_size_x, pool_size_y);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_type != PoolingType::MAX,
+ "Pooling indices only supported for MAX pooling method");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2");
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
@@ -72,16 +77,20 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
}
} // namespace
-CLMaxUnpoolingLayerKernel::CLMaxUnpoolingLayerKernel()
- : _input(nullptr), _output(nullptr), _indices(nullptr)
+CLMaxUnpoolingLayerKernel::CLMaxUnpoolingLayerKernel() : _input(nullptr), _output(nullptr), _indices(nullptr)
{
+ _type = CLKernelType::POOL;
}
-void CLMaxUnpoolingLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, const PoolingLayerInfo &pool_info)
+void CLMaxUnpoolingLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *indices,
+ ICLTensor *output,
+ const PoolingLayerInfo &pool_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), pool_info, indices->info()));
- auto padding_info = get_padding_info({ input, indices, output });
+ auto padding_info = get_padding_info({input, indices, output});
_input = input;
_output = output;
@@ -119,7 +128,10 @@ void CLMaxUnpoolingLayerKernel::configure(const CLCompileContext &compile_contex
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLMaxUnpoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, const PoolingLayerInfo &pool_info)
+Status CLMaxUnpoolingLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *indices,
+ const ITensorInfo *output,
+ const PoolingLayerInfo &pool_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, indices, output);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, pool_info, indices));
@@ -140,7 +152,6 @@ void CLMaxUnpoolingLayerKernel::run(const Window &window, cl::CommandQueue &queu
add_3D_tensor_argument(idx, _output, slice);
add_3D_tensor_argument(idx, _indices, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
+ } while (window.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h b/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h
index cc96cf1a1f..eb18a46784 100644
--- a/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h
+++ b/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h
@@ -53,26 +53,33 @@ public:
* @param[in] compile_context The compile context to be used.
* @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] indices Tensor containing the offset to store the input elements in the output tensor.
- * @ref opencl::ClPooling with indices should precede this function in order to
+ * @ref CLPoolingLayer with indices should precede this function in order to
* properly reconstruct the output tensor.
* The tensor shape of this tensor has to be equal to the input tensor shape. Data type supported: U32.
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, const PoolingLayerInfo &pool_info);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *indices,
+ ICLTensor *output,
+ const PoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLMaxUnpoolingLayerKernel
*
* @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] output Destination tensor info. Data types supported: Same as @p input.
* @param[in] indices TensorInfo associated to the tensor containing the offset to store the input elements in the output tensor.
- * @ref opencl::ClPooling with indices should precede this function in order to
+ * @ref CLPoolingLayer with indices should precede this function in order to
* properly reconstruct the output tensor.
* The tensor shape of this tensor has to be equal to the input tensor shape. Data type supported: U32.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *indices,
+ const ITensorInfo *output,
+ const PoolingLayerInfo &pool_info);
// Inherited methods overridden
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp
index a889df7930..8632bdf623 100644
--- a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp
+++ b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,6 +29,9 @@
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -47,39 +50,19 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, f
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
// Checks performed when output is configured
- if((output != nullptr) && (output->total_size() != 0))
+ if ((output != nullptr) && (output->total_size() != 0))
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
return Status{};
}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
-{
- if(output != nullptr)
- {
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output, *input);
- }
-
- const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
-
- // This kernel doesn't need padding
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
- if(output != nullptr)
- {
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
- }
-
- return std::make_pair(Status{}, win);
-}
} // namespace
CLMeanStdDevNormalizationKernel::CLMeanStdDevNormalizationKernel()
: _input(nullptr), _output(nullptr), _run_in_place(false)
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLMeanStdDevNormalizationKernel::configure(ICLTensor *input, ICLTensor *output, float epsilon)
@@ -87,18 +70,28 @@ void CLMeanStdDevNormalizationKernel::configure(ICLTensor *input, ICLTensor *out
configure(CLKernelLibrary::get().get_compile_context(), input, output, epsilon);
}
-void CLMeanStdDevNormalizationKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float epsilon)
+void CLMeanStdDevNormalizationKernel::configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output,
+ float epsilon)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
_run_in_place = (output == nullptr) || (output == input);
- ARM_COMPUTE_ERROR_THROW_ON(CLMeanStdDevNormalizationKernel::validate(input->info(), (output != nullptr) ? output->info() : nullptr, epsilon));
+ ARM_COMPUTE_ERROR_THROW_ON(CLMeanStdDevNormalizationKernel::validate(
+ input->info(), (output != nullptr) ? output->info() : nullptr, epsilon));
+
+ if (output != nullptr)
+ {
+ auto_init_if_empty(*output->info(), *input->info());
+ }
_input = input;
_output = output;
- const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
+ const unsigned int num_elems_processed_per_iteration =
+ adjust_vec_size(16 / input->info()->element_size(), input->info()->dimension(0));
// Set build options
CLBuildOptions build_opts;
@@ -106,15 +99,15 @@ void CLMeanStdDevNormalizationKernel::configure(const CLCompileContext &compile_
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
build_opts.add_option("-DEPSILON=" + float_to_string_with_full_precision(epsilon));
build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
+ build_opts.add_option_if(input->info()->data_type() == DataType::F16, "-DMEANSTDNORM_HALF");
build_opts.add_option_if(_run_in_place, "-DIN_PLACE");
// Create kernel
_kernel = create_kernel(compile_context, "mean_stddev_normalization", build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), (_run_in_place) ? nullptr : output->info());
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+ ICLKernel::configure_internal(win);
// Set config_id for enabling LWS tuning
_config_id = "mean_stddev_normalization_layer_";
@@ -128,7 +121,6 @@ void CLMeanStdDevNormalizationKernel::configure(const CLCompileContext &compile_
Status CLMeanStdDevNormalizationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, float epsilon)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, epsilon));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), (output != nullptr) ? output->clone().get() : nullptr).first);
return Status{};
}
@@ -148,7 +140,6 @@ void CLMeanStdDevNormalizationKernel::run(const Window &window, cl::CommandQueue
add_2D_tensor_argument_if((!_run_in_place), idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_2D(slice));
+ } while (window.slide_window_slice_2D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.h b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.h
index a1ba2b905e..e02a3c58a3 100644
--- a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.h
+++ b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.h
@@ -66,7 +66,10 @@ public:
* @param[out] output (Optional) Destination tensor. It can be nullptr in case of in-place computation. Data type supported: same as @p input
* @param[in] epsilon (Optional) Small float to avoid division by zero in case of zero standard deviation. Defaults to 1e-8.
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output = nullptr, float epsilon = 1e-8f);
+ void configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output = nullptr,
+ float epsilon = 1e-8f);
/** Static function to check if given info will lead to a valid configuration of @ref CLMeanStdDevNormalizationKernel
*
* @param[in] input Source tensor info with 2 dimensions. In case of @p output tensor info = nullptr,
diff --git a/src/core/CL/kernels/CLMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLMinMaxLayerKernel.cpp
deleted file mode 100644
index 7017efa3c2..0000000000
--- a/src/core/CL/kernels/CLMinMaxLayerKernel.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLMinMaxLayerKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-#include <climits>
-
-using namespace arm_compute;
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() < 3);
-
- if(output->tensor_shape().total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-
- TensorShape output_shape = compute_min_max_shape(input);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
- }
-
- return Status{};
-}
-
-std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
-{
- TensorShape output_shape = compute_min_max_shape(input);
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*output, output_shape, 1, input->data_type());
-
- const unsigned int num_elems_processed_per_iteration = 1;
-
- // Configure kernel window
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
- AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowStatic output_access(output, 0, 0, 2, output->dimension(1));
-
- bool window_changed = update_window_and_padding(win, input_access, output_access);
-
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_tuple(err, win);
-}
-} // namespace
-
-CLMinMaxLayerKernel::CLMinMaxLayerKernel()
- : _input(nullptr), _output(nullptr)
-{
-}
-
-void CLMinMaxLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output);
-}
-
-void CLMinMaxLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
-
- _input = input;
- _output = output;
-
- std::set<std::string> build_opts;
- build_opts.emplace("-DWIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
- build_opts.emplace("-DHEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
- build_opts.emplace("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
-
- // Create kernel
- _kernel = create_kernel(compile_context, "minmax_layer", build_opts);
-
- auto win_config = validate_and_configure_window(input->info(), output->info());
-
- ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
-
- ICLKernel::configure_internal(std::get<1>(win_config));
-}
-
-Status CLMinMaxLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
- ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get())));
-
- return Status{};
-}
-
-void CLMinMaxLayerKernel::reset(cl::CommandQueue &queue)
-{
- _output->map(queue, true);
-
- Window window_output;
- window_output.use_tensor_dimensions(_output->info()->tensor_shape());
- window_output.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator output(_output, window_output);
-
- // Reset output
- execute_window_loop(window_output, [&](const Coordinates &)
- {
- auto *ptr = reinterpret_cast<float *>(output.ptr());
- ptr[0] = std::numeric_limits<float>::max();
- ptr[1] = std::numeric_limits<float>::min();
- },
- output);
-
- _output->unmap(queue);
-}
-
-void CLMinMaxLayerKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), 3);
- Window slice = window_collapsed.first_slice_window_3D();
- slice.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice.set(Window::DimY, Window::Dimension(0, 1, 1));
- slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
-
- do
- {
- Window output_slice = slice.shift_dimensions(2);
-
- unsigned int idx = 0;
- // Set inputs
- add_3D_tensor_argument(idx, _input, slice);
- add_1D_tensor_argument(idx, _output, output_slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
-}
diff --git a/src/core/CL/kernels/CLMinMaxLayerKernel.h b/src/core/CL/kernels/CLMinMaxLayerKernel.h
deleted file mode 100644
index aa2ff3f375..0000000000
--- a/src/core/CL/kernels/CLMinMaxLayerKernel.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLMINMAXLAYERKERNEL_H
-#define ARM_COMPUTE_CLMINMAXLAYERKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the kernel to perform min max search on a 3D tensor.
- */
-class CLMinMaxLayerKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLMinMaxLayerKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLMinMaxLayerKernel(const CLMinMaxLayerKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLMinMaxLayerKernel &operator=(const CLMinMaxLayerKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLMinMaxLayerKernel(CLMinMaxLayerKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLMinMaxLayerKernel &operator=(CLMinMaxLayerKernel &&) = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] input Input tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches.Data types supported: F32.
- * @param[out] output Output tensor with shape [2, batches, ...] which stores the minimum and maximum values for each 3D input tensor.
- * The dimensions over the second must match the batched dimensions of the input tensor. Data types supported: F32.
- */
- void configure(const ICLTensor *input, ICLTensor *output);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches.Data types supported: F32.
- * @param[out] output Output tensor with shape [2, batches, ...] which stores the minimum and maximum values for each 3D input tensor.
- * The dimensions over the second must match the batched dimensions of the input tensor. Data types supported: F32.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref CLMinMaxLayerKernel
- *
- * @param[in] input Input tensor info. Data types supported: F32.
- * @param[in] output Output tensor info with shape [2, batches, ...] which stores the minimum and maximum values for each 3D input tensor.
- * The dimensions over the second must match the batched dimensions of the input tensor. Data types supported: F32.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- /** Resets global minimum and maximum
- *
- * @param[in,out] queue Command queue on which to map and unmap the min_max tensor
- */
- void reset(cl::CommandQueue &queue);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLMINMAXLAYERKERNEL_H */
diff --git a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
index d1982e77b9..b636c485e7 100644
--- a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,7 +29,10 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+#include "arm_compute/core/utils/StringUtils.h"
#include "arm_compute/core/Window.h"
+
#include "src/core/AccessWindowStatic.h"
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
@@ -37,11 +40,10 @@
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
namespace
{
-constexpr unsigned int num_elems_processed_per_iteration = 4;
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, NormalizationLayerInfo norm_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
@@ -52,7 +54,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, N
ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(norm_info.norm_size() % 2), "Normalization size should be odd");
// Checks performed when output is configured
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
@@ -62,38 +64,66 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, N
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, NormalizationLayerInfo norm_info)
+std::pair<Status, Window>
+validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, NormalizationLayerInfo norm_info)
{
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*output, *input->clone());
- const unsigned int norm_idx = get_normalization_dimension_index(input->data_layout(), norm_info);
- const bool is_norm_accross_width = norm_idx == 0;
+ bool window_changed = false;
+ Window win;
+ const DataLayout data_layout = input->data_layout();
+ if (data_layout == DataLayout::NCHW)
+ {
+ const unsigned int vec_size_x =
+ adjust_vec_size(max_cl_vector_width / input->element_size(), input->dimension(0));
+ const unsigned int norm_idx = get_normalization_dimension_index(input->data_layout(), norm_info);
+ const bool is_norm_across_width = norm_idx == 0;
- const unsigned int border_width = is_norm_accross_width ? num_elems_processed_per_iteration - 1 : 0;
- const BorderSize border_size = BorderSize(0, border_width);
+ const unsigned int norm_radius = norm_info.norm_size() / 2;
+ // Border / padding calculation:
+ // For NCHW no border handling is impelmeneted in the kernel in the x axis.
+ // This means the x axis is fully-padded depending on vec_size_x and norm_size
+ // E.G. for input x dimension = 3, norm_size = 3 (radius = 1), vec_size_x = 2 ('#' is element 'p' is padding):
+ // In : |p|#|#|#|p|p|
+ // Out: |#|#|#|p|
+ // The output has 1 right padding because of the vec_size_x.
+ // The input has 1 left padding because radius = 1.
+ // The input has 2 right padding because of radius = 1 AND because of the extra output padding
+ const unsigned int border_width_left = is_norm_across_width ? norm_radius : 0;
+ const unsigned int border_width_right =
+ is_norm_across_width ? norm_radius + (vec_size_x - input->dimension(0) % vec_size_x) : 0;
+ const BorderSize border_size = BorderSize(0, border_width_right, 0, border_width_left);
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
- bool window_changed = false;
+ win = calculate_max_window(*input, Steps(vec_size_x));
- // We do not use a Rectangle window for IN_MAP_2D as we clamp the top and bottom accesses inside the kernel, avoiding padding
- // Reads can occur within the valid region of the input
- if(is_norm_accross_width)
- {
- AccessWindowStatic input_access(input, -border_size.left, 0, input->dimension(0) + border_size.right, 0);
- window_changed = window_changed || update_window_and_padding(win, input_access);
+ // We do not use a Rectangle window for IN_MAP_2D as we clamp the top and bottom accesses inside the kernel, avoiding padding
+ // Reads can occur within the valid region of the input
+ if (is_norm_across_width)
+ {
+ AccessWindowStatic input_access(input, -border_size.left, 0, input->dimension(0) + border_size.right, 0);
+ window_changed = window_changed || update_window_and_padding(win, input_access);
+ }
+ else
+ {
+ AccessWindowHorizontal input_access(input, -border_size.left, vec_size_x);
+ window_changed = window_changed || update_window_and_padding(win, input_access);
+ }
+
+ AccessWindowHorizontal output_access(output, 0, vec_size_x);
+ window_changed = window_changed || update_window_and_padding(win, output_access);
}
else
{
- AccessWindowHorizontal input_access(input, -border_size.left, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, input_access);
+ unsigned int vec_size_x = adjust_vec_size(max_cl_vector_width / input->element_size(), input->dimension(0));
+ if (norm_info.is_cross_map())
+ {
+ vec_size_x = 1;
+ }
+ win = calculate_max_window(*input, Steps(vec_size_x));
}
-
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, output_access);
- output_access.set_valid_region(win, input->valid_region());
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ Status err =
+ (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
} // namespace
@@ -101,6 +131,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
CLNormalizationLayerKernel::CLNormalizationLayerKernel()
: _input(nullptr), _output(nullptr), _border_size(0), _is_norm_across_width(false)
{
+ _type = CLKernelType::ELEMENTWISE;
}
BorderSize CLNormalizationLayerKernel::border_size() const
@@ -113,24 +144,51 @@ void CLNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *ou
configure(CLKernelLibrary::get().get_compile_context(), input, output, norm_info);
}
-void CLNormalizationLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info)
+void CLNormalizationLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ NormalizationLayerInfo norm_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), *input->info()->clone());
+ auto padding_info = get_padding_info({input, output});
// Perform validation step
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), norm_info));
+ auto win_config = validate_and_configure_window(input->info(), output->info(), norm_info);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
_input = input;
_output = output;
- const DataLayout data_layout = input->info()->data_layout();
- const unsigned int norm_idx = get_normalization_dimension_index(data_layout, norm_info);
- _is_norm_across_width = norm_idx == 0;
- const unsigned int border_width = _is_norm_across_width ? num_elems_processed_per_iteration - 1 : 0;
- _border_size = BorderSize(0, border_width);
+ const DataLayout data_layout = input->info()->data_layout();
+ unsigned int vec_size_x =
+ adjust_vec_size(max_cl_vector_width / input->info()->element_size(), input->info()->dimension(0));
+ int vec_size_x_leftovers = input->info()->dimension(0) % vec_size_x;
+ if (norm_info.is_cross_map() && data_layout == DataLayout::NHWC)
+ {
+ vec_size_x = 1;
+ vec_size_x_leftovers = 0;
+ }
+
+ if (data_layout == DataLayout::NCHW)
+ {
+ const unsigned int norm_idx = get_normalization_dimension_index(data_layout, norm_info);
+ _is_norm_across_width = norm_idx == 0;
+ const unsigned int norm_radius = norm_info.norm_size() / 2;
+ // Border / padding calculation:
+ // For NCHW no border handling is impelmeneted in the kernel in the x axis.
+ // This means the x axis is fully-padded depending on vec_size_x and norm_size
+ // E.G. for input x dimension = 3, norm_size = 3 (radius = 1), vec_size_x = 2 ('#' is element 'p' is padding):
+ // In : |p|#|#|#|p|p|
+ // Out: |#|#|#|p|
+ // The output has 1 right padding because of the vec_size_x.
+ // The input has 1 left padding because radius = 1.
+ // The input has 2 right padding because of radius = 1 AND the extra output padding
+ const unsigned int border_width_left = _is_norm_across_width ? norm_radius : 0;
+ const unsigned int border_width_right =
+ _is_norm_across_width ? norm_radius + (vec_size_x - input->info()->dimension(0) % vec_size_x) : 0;
+ _border_size = BorderSize(0, border_width_right, 0, border_width_left);
+ }
const bool is_in_map_2D = (norm_info.type() == NormType::IN_MAP_2D);
@@ -140,35 +198,29 @@ void CLNormalizationLayerKernel::configure(const CLCompileContext &compile_conte
build_opts.add_option(("-DCOEFF=" + float_to_string_with_full_precision(norm_info.scale_coeff())));
build_opts.add_option(("-DBETA=" + float_to_string_with_full_precision(norm_info.beta())));
build_opts.add_option(("-DKAPPA=" + float_to_string_with_full_precision(norm_info.kappa())));
- build_opts.add_option(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+ build_opts.add_option(("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x)));
+ build_opts.add_option(("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_x_leftovers)));
build_opts.add_option(("-DRADIUS=" + support::cpp11::to_string(norm_info.norm_size() / 2)));
build_opts.add_option(("-DNUM_SLICES=" + support::cpp11::to_string(input->info()->dimension(2))));
build_opts.add_option_if(is_in_map_2D, "-DIN_MAP_2D");
- build_opts.add_option_if(norm_info.is_in_map() || (data_layout == DataLayout::NHWC && norm_info.is_cross_map()), "-DWIDTH_SIZE=" + support::cpp11::to_string(input->info()->dimension(0)));
+ build_opts.add_option_if(norm_info.is_in_map() || (data_layout == DataLayout::NHWC && norm_info.is_cross_map()),
+ "-DWIDTH_SIZE=" + support::cpp11::to_string(input->info()->dimension(0)));
+ build_opts.add_option_if(norm_info.is_in_map() && data_layout == DataLayout::NHWC,
+ "-DDIM1_SIZE=" + support::cpp11::to_string(input->info()->dimension(1)));
// Create kernel
std::string kernel_name;
- if(norm_info.is_in_map())
+ if (norm_info.is_in_map())
{
kernel_name = "normalization_layer_in_map_" + lower_string(string_from_data_layout(data_layout));
}
else
{
- if(data_layout == DataLayout::NCHW)
- {
- kernel_name = "normalization_layer_cross_map";
- }
- else
- {
- // 1D Cross-Map normalization in NHWC is the same as 1D In-Map normalization in NCHW
- kernel_name = "normalization_layer_in_map_nchw";
- }
+ kernel_name = "normalization_layer_cross_map_" + lower_string(string_from_data_layout(data_layout));
}
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), norm_info);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
// Set config_id for enabling LWS tuning
@@ -182,12 +234,19 @@ void CLNormalizationLayerKernel::configure(const CLCompileContext &compile_conte
_config_id += support::cpp11::to_string(input->info()->dimension(0));
_config_id += "_";
_config_id += support::cpp11::to_string(input->info()->dimension(1));
+ if (data_layout == DataLayout::NHWC)
+ {
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+ }
}
-Status CLNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, NormalizationLayerInfo norm_info)
+Status CLNormalizationLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ NormalizationLayerInfo norm_info)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, norm_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), norm_info).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_and_configure_window(input->clone().get(), output->clone().get(), norm_info).first);
return Status{};
}
@@ -207,6 +266,6 @@ void CLNormalizationLayerKernel::run(const Window &window, cl::CommandQueue &que
add_3D_tensor_argument(idx, _input, slice);
add_3D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
+ } while (window_collapsed.slide_window_slice_3D(slice));
}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLNormalizationLayerKernel.h b/src/core/CL/kernels/CLNormalizationLayerKernel.h
index 739a2ae9f1..5517ba6904 100644
--- a/src/core/CL/kernels/CLNormalizationLayerKernel.h
+++ b/src/core/CL/kernels/CLNormalizationLayerKernel.h
@@ -63,7 +63,10 @@ public:
* Data layouts supported: same as @p input.
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ NormalizationLayerInfo norm_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLNormalizationLayerKernel
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
@@ -77,7 +80,7 @@ public:
static Status validate(const ITensorInfo *input, const ITensorInfo *output, NormalizationLayerInfo norm_info);
// Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
+ void run(const Window &window, cl::CommandQueue &queue) override;
BorderSize border_size() const override;
private:
diff --git a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp
index 18cbe217be..59352a8fb7 100644
--- a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp
+++ b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,32 +29,37 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/AccessWindowStatic.h"
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *std)
+Status
+validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *std)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
+ DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, std);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, std);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(mean->num_dimensions() > 1, "mean and std must be vectors");
- const unsigned int channel_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
+ const unsigned int channel_idx =
+ get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(channel_idx) != mean->dimension(0));
// Checks performed when output is configured
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
@@ -64,11 +69,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, ITensorInfo *mean, ITensorInfo *std)
+std::pair<Status, Window> validate_and_configure_window_nchw(ITensorInfo *input, ITensorInfo *output)
{
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output, *input->clone());
-
const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
@@ -77,16 +79,9 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
bool window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, input->valid_region());
-
- if(input->data_layout() == DataLayout::NHWC)
- {
- AccessWindowHorizontal mean_access(mean, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal std_access(std, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, mean_access, std_access);
- }
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ Status err =
+ (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
} // namespace
@@ -94,36 +89,57 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
CLNormalizePlanarYUVLayerKernel::CLNormalizePlanarYUVLayerKernel()
: _input(nullptr), _output(nullptr), _mean(nullptr), _std(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLNormalizePlanarYUVLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std)
+void CLNormalizePlanarYUVLayerKernel::configure(const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *mean,
+ const ICLTensor *std)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, mean, std);
}
-void CLNormalizePlanarYUVLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std)
+void CLNormalizePlanarYUVLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *mean,
+ const ICLTensor *std)
{
// Perform validation step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, mean, std);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), mean->info(), std->info()));
+ // Output tensor auto initialization if not yet initialized
+ auto_init_if_empty(*output->info(), *input->info()->clone());
+
+ auto padding_info = get_padding_info({input, output});
+
_input = input;
_output = output;
_mean = mean;
_std = std;
- const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
- const unsigned int channel_idx = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL);
- const DataType dt = input->info()->data_type();
+ const DataLayout data_layout = input->info()->data_layout();
+
+ // Get number of elements to process per iterations
+ const unsigned int num_elems_processed_per_iteration =
+ (data_layout == DataLayout::NHWC)
+ ? adjust_vec_size(16 / input->info()->element_size(), input->info()->dimension(0))
+ : (16 / input->info()->element_size());
+ const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const DataType dt = input->info()->data_type();
// Set build options
CLBuildOptions build_opts;
build_opts.add_option(("-DDATA_TYPE=" + get_cl_type_from_data_type(dt)));
build_opts.add_option(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+ build_opts.add_option(("-DVEC_SIZE_LEFTOVER=" +
+ support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration)));
build_opts.add_option(("-DNUM_CHANNELS=" + support::cpp11::to_string(input->info()->dimension(channel_idx))));
std::string kernel_name = "normalize_planar_yuv_layer_";
- if(is_data_type_quantized(dt))
+ if (is_data_type_quantized(dt))
{
const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform();
build_opts.add_option(("-DOFFSET=" + support::cpp11::to_string(qinfo.offset)));
@@ -132,13 +148,22 @@ void CLNormalizePlanarYUVLayerKernel::configure(const CLCompileContext &compile_
}
// Create kernel
- kernel_name += lower_string(string_from_data_layout(input->info()->data_layout()));
+ kernel_name += lower_string(string_from_data_layout(data_layout));
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), mean->info(), std->info());
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ if (data_layout == DataLayout::NHWC)
+ {
+ Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+ ICLKernel::configure_internal(win);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+ }
+ else
+ {
+ auto win_config = validate_and_configure_window_nchw(input->info(), output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ ICLKernel::configure_internal(win_config.second);
+ }
// Set config_id for enabling LWS tuning
_config_id = "normalize_planar_yuv_layer_";
@@ -153,11 +178,17 @@ void CLNormalizePlanarYUVLayerKernel::configure(const CLCompileContext &compile_
_config_id += support::cpp11::to_string(input->info()->dimension(2));
}
-Status CLNormalizePlanarYUVLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *std)
+Status CLNormalizePlanarYUVLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *mean,
+ const ITensorInfo *std)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, mean, std));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), mean->clone().get(), std->clone().get()).first);
-
+ if (input->data_layout() == DataLayout::NCHW)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_and_configure_window_nchw(input->clone().get(), output->clone().get()).first);
+ }
return Status{};
}
@@ -182,7 +213,6 @@ void CLNormalizePlanarYUVLayerKernel::run(const Window &window, cl::CommandQueue
add_3D_tensor_argument(idx, _input, slice);
add_3D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
+ } while (collapsed.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h
index 6db4433e78..341b404e3d 100644
--- a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h
+++ b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h
@@ -67,7 +67,11 @@ public:
* @param[in] std Standard deviation values tensor. 1 dimension with size equal to the number of input channels.
* Data types supported: same as @p input
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *mean,
+ const ICLTensor *std);
/** Static function to check if given info will lead to a valid configuration of @ref CLNormalizePlanarYUVLayerKernel
*
* @param[in] input Source tensor info. 3 lower dimensions represent a single input with dimensions [width, height, channels].
@@ -79,7 +83,8 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *std);
+ static Status
+ validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *std);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLPadLayerKernel.cpp b/src/core/CL/kernels/CLPadLayerKernel.cpp
index 2f54b390d5..0ac285038e 100644
--- a/src/core/CL/kernels/CLPadLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPadLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,9 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
@@ -34,25 +36,29 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const PaddingList &padding,
+ PixelValue constant_value,
+ PaddingMode mode)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_UNUSED(constant_value);
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON((padding.size() < 1) || (padding.size() > input->num_dimensions()));
- if(mode == PaddingMode::REFLECT || mode == PaddingMode::SYMMETRIC)
+ if (mode == PaddingMode::REFLECT || mode == PaddingMode::SYMMETRIC)
{
ARM_COMPUTE_RETURN_ERROR_ON(padding.size() > 3);
const auto is_reflect = static_cast<unsigned int>(mode == PaddingMode::REFLECT);
- for(size_t i = 0; i < padding.size(); ++i)
+ for (size_t i = 0; i < padding.size(); ++i)
{
ARM_COMPUTE_RETURN_ERROR_ON(padding.at(i).first > (input->dimension(i) - is_reflect));
ARM_COMPUTE_RETURN_ERROR_ON(padding.at(i).second > (input->dimension(i) - is_reflect));
}
}
- if(output->total_size() > 0)
+ if (output->total_size() > 0)
{
TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding);
@@ -64,40 +70,51 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
}
} // namespace
-CLPadLayerKernel::CLPadLayerKernel()
- : _input(nullptr), _output(nullptr), _4d_enabled(false)
+CLPadLayerKernel::CLPadLayerKernel() : _input(nullptr), _output(nullptr), _4d_enabled(false)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLPadLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
+void CLPadLayerKernel::configure(
+ const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, padding, constant_value, mode);
}
-void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
+void CLPadLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const PaddingList &padding,
+ PixelValue constant_value,
+ PaddingMode mode)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), padding)));
+ auto_init_if_empty(*output->info(),
+ input->info()->clone()->set_tensor_shape(
+ misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), padding)));
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding, constant_value, mode));
- auto padding_info = get_padding_info({ input, output });
+ auto padding_info = get_padding_info({input, output});
_input = input;
_output = output;
_4d_enabled = (mode == PaddingMode::CONSTANT) && (padding.size() > 3);
// Set build options
- const DataType &data_type = input->info()->data_type();
- const unsigned int input_width = input->info()->dimension(0);
- const unsigned int input_height = input->info()->dimension(1);
- const unsigned int input_depth = input->info()->dimension(2);
- const unsigned int pad_x_before = padding.at(0).first;
- const unsigned int pad_y_before = padding.size() > 1 ? padding.at(1).first : 0;
- const unsigned int pad_z_before = padding.size() > 2 ? padding.at(2).first : 0;
- const unsigned int vec_size = adjust_vec_size(std::min(16U, 32U / static_cast<unsigned int>(element_size_from_data_type(input->info()->data_type()))), input_width);
- const unsigned int pad_right_start = input_width + pad_x_before;
- const unsigned int pad_x_before_remainder = pad_x_before % vec_size;
- const unsigned int vec_size_leftover_write = vec_size - (ceil_to_multiple(output->info()->dimension(0), vec_size) - output->info()->dimension(0));
+ const DataType &data_type = input->info()->data_type();
+ const unsigned int input_width = input->info()->dimension(0);
+ const unsigned int input_height = input->info()->dimension(1);
+ const unsigned int input_depth = input->info()->dimension(2);
+ const unsigned int pad_x_before = padding.at(0).first;
+ const unsigned int pad_y_before = padding.size() > 1 ? padding.at(1).first : 0;
+ const unsigned int pad_z_before = padding.size() > 2 ? padding.at(2).first : 0;
+ const unsigned int vec_size = adjust_vec_size(
+ std::min(16U, 32U / static_cast<unsigned int>(element_size_from_data_type(input->info()->data_type()))),
+ input_width);
+ const unsigned int pad_right_start = input_width + pad_x_before;
+ const unsigned int pad_x_before_remainder = pad_x_before % vec_size;
+ const unsigned int vec_size_leftover_write =
+ vec_size - (ceil_to_multiple(output->info()->dimension(0), vec_size) - output->info()->dimension(0));
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
@@ -106,12 +123,12 @@ void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const
build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(input_width));
build_opts.add_option("-DPAD_X_BEFORE_REMAINDER=" + support::cpp11::to_string(pad_x_before_remainder));
build_opts.add_option("-DVEC_SIZE_LEFTOVER_WRITE=" + support::cpp11::to_string(vec_size_leftover_write));
- if(padding.size() > 1)
+ if (padding.size() > 1)
{
build_opts.add_option("-DPAD_Y_BEFORE=" + support::cpp11::to_string(pad_y_before));
build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input_height));
- if(padding.size() > 2)
+ if (padding.size() > 2)
{
build_opts.add_option("-DPAD_Z_BEFORE=" + support::cpp11::to_string(pad_z_before));
build_opts.add_option("-DSRC_DEPTH=" + support::cpp11::to_string(input_depth));
@@ -119,23 +136,25 @@ void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const
}
std::string kernel_name = "pad_layer_";
- switch(mode)
+ switch (mode)
{
case PaddingMode::CONSTANT:
{
kernel_name += "constant";
- const unsigned int vec_size_leftover_read = vec_size - (ceil_to_multiple(pad_right_start, vec_size) - pad_right_start);
+ const unsigned int vec_size_leftover_read =
+ vec_size - (ceil_to_multiple(pad_right_start, vec_size) - pad_right_start);
build_opts.add_option("-DCONST_VAL=" + string_from_pixel_value(constant_value, data_type));
build_opts.add_option("-DVEC_SIZE_LEFTOVER_READ=" + support::cpp11::to_string(vec_size_leftover_read));
- if(pad_x_before >= vec_size)
+ if (pad_x_before >= vec_size)
{
build_opts.add_option("-DTHREADS_TO_SKIP_BEFORE=" + support::cpp11::to_string(pad_x_before / vec_size));
- build_opts.add_option("-DTHREADS_TO_SKIP_AFTER=" + support::cpp11::to_string(pad_right_start / vec_size));
+ build_opts.add_option("-DTHREADS_TO_SKIP_AFTER=" +
+ support::cpp11::to_string(pad_right_start / vec_size));
}
- if(_4d_enabled)
+ if (_4d_enabled)
{
build_opts.add_option("-DPAD_W_BEFORE=" + support::cpp11::to_string(padding.at(3).first));
build_opts.add_option("-DSRC_BATCH=" + support::cpp11::to_string(input->info()->dimension(3)));
@@ -152,14 +171,17 @@ void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const
const unsigned int pad_x_after_remainder = pad_right_start % vec_size;
const unsigned int after_pad_fact_x = (2 * input_width + pad_x_before) - is_reflect;
- const unsigned int output_last_x = ceil_to_multiple(pad_right_start + padding.at(0).second, vec_size);
+ const unsigned int output_last_x = ceil_to_multiple(pad_right_start + padding.at(0).second, vec_size);
build_opts.add_option("-DIS_REFLECT=" + support::cpp11::to_string(is_reflect));
build_opts.add_option("-DPAD_X_AFTER_REMAINDER=" + support::cpp11::to_string(pad_x_after_remainder));
- build_opts.add_option("-DPAD_X_BEFORE_REMAINDER_REFL=" + support::cpp11::to_string((pad_x_before_remainder + is_reflect) % vec_size));
- build_opts.add_option("-DPAD_X_AFTER_REMAINDER_REFL=" + support::cpp11::to_string((pad_x_after_remainder - is_reflect) % vec_size));
+ build_opts.add_option("-DPAD_X_BEFORE_REMAINDER_REFL=" +
+ support::cpp11::to_string((pad_x_before_remainder + is_reflect) % vec_size));
+ build_opts.add_option("-DPAD_X_AFTER_REMAINDER_REFL=" +
+ support::cpp11::to_string((pad_x_after_remainder - is_reflect) % vec_size));
build_opts.add_option("-DAFTER_PAD_FACT_X=" + support::cpp11::to_string(after_pad_fact_x));
- build_opts.add_option_if(after_pad_fact_x < output_last_x, "-DAFTER_PAD_REM=" + support::cpp11::to_string(after_pad_fact_x % vec_size));
+ build_opts.add_option_if(after_pad_fact_x < output_last_x,
+ "-DAFTER_PAD_REM=" + support::cpp11::to_string(after_pad_fact_x % vec_size));
break;
}
@@ -177,7 +199,11 @@ void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLPadLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
+Status CLPadLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const PaddingList &padding,
+ PixelValue constant_value,
+ PaddingMode mode)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding, constant_value, mode));
return Status{};
@@ -195,13 +221,12 @@ void CLPadLayerKernel::run(const Window &window, cl::CommandQueue &queue)
unsigned int idx = 0;
add_3D_tensor_argument(idx, _input, slice);
add_3D_tensor_argument(idx, _output, slice);
- if(_4d_enabled)
+ if (_4d_enabled)
{
add_argument<unsigned int>(idx, batch++);
}
enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
+ } while (window.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLPadLayerKernel.h b/src/core/CL/kernels/CLPadLayerKernel.h
index 90af337f94..dca121b6a1 100644
--- a/src/core/CL/kernels/CLPadLayerKernel.h
+++ b/src/core/CL/kernels/CLPadLayerKernel.h
@@ -56,7 +56,11 @@ public:
* @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT,
* or reflect the input, either including the border values (SYMMETRIC) or not (REFLECT).
*/
- void configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value = PixelValue(), PaddingMode mode = PaddingMode::CONSTANT);
+ void configure(const ICLTensor *input,
+ ICLTensor *output,
+ const PaddingList &padding,
+ PixelValue constant_value = PixelValue(),
+ PaddingMode mode = PaddingMode::CONSTANT);
/** Set the input and output tensor.
*
* @param[in] compile_context The compile context to be used.
@@ -68,8 +72,12 @@ public:
* @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT,
* or reflect the input, either including the border values (SYMMETRIC) or not (REFLECT).
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value = PixelValue(),
- PaddingMode mode = PaddingMode::CONSTANT);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const PaddingList &padding,
+ PixelValue constant_value = PixelValue(),
+ PaddingMode mode = PaddingMode::CONSTANT);
/** Static function to check if given info will lead to a valid configuration of @ref CLPadLayerKernel
*
* @param[in] input Source tensor info. Data types supported: All.
@@ -80,7 +88,11 @@ public:
* @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT,
* or reflect the input, either including the border values (SYMMETRIC) or not (REFLECT).
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value = PixelValue(), PaddingMode mode = PaddingMode::CONSTANT);
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const PaddingList &padding,
+ PixelValue constant_value = PixelValue(),
+ PaddingMode mode = PaddingMode::CONSTANT);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
deleted file mode 100644
index c68c526ec9..0000000000
--- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-constexpr unsigned int num_elems_processed_per_iteration = 16;
-
-Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_UNUSED(overflow_policy);
- ARM_COMPUTE_UNUSED(rounding_policy);
-
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1,
- 1,
- DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
- DataType::S16, DataType::QSYMM16, DataType::F16,
- DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2,
- 1,
- DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
- DataType::S16, DataType::QSYMM16, DataType::F16,
- DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale < 0, "Scale cannot be negative.");
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
-
- const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
-
- // Validate in case of configured output
- if(output->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output,
- 1,
- DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
- DataType::S16, DataType::QSYMM16, DataType::F16,
- DataType::S32, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
- "Output can only be U8 if both inputs are U8");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8 && (input1->data_type() != DataType::QASYMM8 || input2->data_type() != DataType::QASYMM8),
- "Output can only be QASYMM8 if both inputs are QASYMM8");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8_SIGNED && (input1->data_type() != DataType::QASYMM8_SIGNED || input2->data_type() != DataType::QASYMM8_SIGNED),
- "Output can only be QASYMM8_SIGNED if both inputs are QASYMM8_SIGNED");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QSYMM16 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16),
- "Output can only be QSYMM16 if both inputs are QSYMM16");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::S32 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16),
- "Output can only be S32 if both inputs are QSYMM16");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
-{
- const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
- const TensorShape &out_shape = broadcast_pair.first;
- const ValidRegion &valid_region = broadcast_pair.second;
-
- // Auto initialize output if not initialized
- {
- set_shape_if_empty(*output, out_shape);
-
- if(input1->data_type() == DataType::S16 || input2->data_type() == DataType::S16)
- {
- set_format_if_unknown(*output, Format::S16);
- }
- else if(input1->data_type() == DataType::F32 || input2->data_type() == DataType::F32)
- {
- set_format_if_unknown(*output, Format::F32);
- }
- else if(input1->data_type() == DataType::QASYMM8)
- {
- set_data_type_if_unknown(*output, DataType::QASYMM8);
- }
- else if(input1->data_type() == DataType::QASYMM8_SIGNED)
- {
- set_data_type_if_unknown(*output, DataType::QASYMM8_SIGNED);
- }
- else if(input1->data_type() == DataType::QSYMM16)
- {
- set_data_type_if_unknown(*output, DataType::QSYMM16);
- }
- }
-
- Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
- Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
- Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
-
- AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
-
- bool window_changed = update_window_and_padding(win_input1, input1_access)
- || update_window_and_padding(win_input2, input2_access)
- || update_window_and_padding(win, output_access);
-
- output_access.set_valid_region(win, valid_region);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLPixelWiseMultiplicationKernel::CLPixelWiseMultiplicationKernel()
- : _input1(nullptr), _input2(nullptr), _output(nullptr)
-{
-}
-
-void CLPixelWiseMultiplicationKernel::configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, scale, overflow_policy, rounding_policy, act_info);
-}
-
-void CLPixelWiseMultiplicationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1, input2, output,
- scale, overflow_policy, rounding_policy, act_info));
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input1, input2, output);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
-
- _input1 = input1;
- _input2 = input2;
- _output = output;
-
- int scale_int = -1;
- // Extract sign, exponent and mantissa
- int exponent = 0;
- float normalized_mantissa = std::frexp(scale, &exponent);
- // Use int scaling if factor is equal to 1/2^n for 0 <= n <= 15
- // frexp returns 0.5 as mantissa which means that the exponent will be in the range of -1 <= e <= 14
- // Moreover, it will be negative as we deal with 1/2^n
- if((normalized_mantissa == 0.5f) && (-14 <= exponent) && (exponent <= 1))
- {
- // Store the positive exponent. We know that we compute 1/2^n
- // Additionally we need to subtract 1 to compensate that frexp used a mantissa of 0.5
- scale_int = std::abs(exponent - 1);
- }
-
- std::string acc_type;
- // Check if it has float inputs and output
- if(is_data_type_float(input1->data_type()) || is_data_type_float(input2->data_type()))
- {
- scale_int = -1;
- acc_type = (input1->data_type() == DataType::F32 || input2->data_type() == DataType::F32) ? "float" : "half";
- }
- else
- {
- if(input1->element_size() == 2 || input2->element_size() == 2)
- {
- // Use 32-bit accumulator for 16-bit input
- acc_type = "int";
- }
- else
- {
- // Use 16-bit accumulator for 8-bit input
- acc_type = "ushort";
- }
- }
-
- const bool is_quantized = is_data_type_quantized(input1->data_type());
-
- // Set kernel build options
- std::string kernel_name = "pixelwise_mul";
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1->data_type()));
- build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2->data_type()));
- build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->data_type()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- if(is_quantized && (output->data_type() != DataType::S32))
- {
- const UniformQuantizationInfo iq1_info = input1->quantization_info().uniform();
- const UniformQuantizationInfo iq2_info = input2->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
-
- build_opts.add_option_if(is_data_type_quantized_asymmetric(input1->data_type()),
- "-DOFFSET_IN1=" + support::cpp11::to_string(iq1_info.offset));
- build_opts.add_option_if(is_data_type_quantized_asymmetric(input2->data_type()),
- "-DOFFSET_IN2=" + support::cpp11::to_string(iq2_info.offset));
- build_opts.add_option_if(is_data_type_quantized_asymmetric(output->data_type()),
- "-DOFFSET_OUT=" + support::cpp11::to_string(oq_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1_info.scale));
- build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2_info.scale));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
- kernel_name += "_quantized";
- }
- else
- {
- kernel_name += (scale_int >= 0) ? "_int" : "_float";
- build_opts.add_option_if_else(overflow_policy == ConvertPolicy::WRAP || is_data_type_float(output->data_type()), "-DWRAP", "-DSATURATE");
- build_opts.add_option_if_else(rounding_policy == RoundingPolicy::TO_ZERO, "-DROUND=_rtz", "-DROUND=_rte");
- build_opts.add_option("-DACC_DATA_TYPE=" + acc_type);
- if(act_info.enabled())
- {
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
- }
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set scale argument
- unsigned int idx = 3 * num_arguments_per_3D_tensor(); // Skip the inputs and output parameters
-
- if(scale_int >= 0 && !is_quantized)
- {
- _kernel.setArg(idx++, scale_int);
- }
- else
- {
- _kernel.setArg(idx++, scale);
- }
-
- ICLKernel::configure_internal(win_config.second);
-}
-
-Status CLPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, scale, overflow_policy, rounding_policy, act_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
-
- return Status{};
-}
-
-void CLPixelWiseMultiplicationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src_0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto src_1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- const TensorShape &in_shape1 = src_0->info()->tensor_shape();
- const TensorShape &in_shape2 = src_1->info()->tensor_shape();
- const TensorShape &out_shape = dst->info()->tensor_shape();
-
- bool can_collapse = true;
- if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
- {
- can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
- for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
- {
- can_collapse = (in_shape1[d] == in_shape2[d]);
- }
- }
-
- bool has_collapsed = false;
- Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
-
- const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
- const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
-
- Window slice = collapsed.first_slice_window_3D();
- Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
- Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src_0, slice_input1);
- add_3D_tensor_argument(idx, src_1, slice_input2);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
-
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-
-BorderSize CLPixelWiseMultiplicationKernel::border_size() const
-{
- const unsigned int replicateSize = _output->dimension(0) - std::min(_input1->dimension(0), _input2->dimension(0));
- const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
- return BorderSize{ 0, border, 0, 0 };
-}
-
-namespace
-{
-constexpr unsigned int num_elems_processed_per_iteration_complex = 1;
-
-Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 2, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 2, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2);
-
- const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
-
- // Validate in case of configured output
- if(output->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 2, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window_complex(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
-{
- const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
- const TensorShape &out_shape = broadcast_pair.first;
- const ValidRegion &valid_region = broadcast_pair.second;
-
- // Auto initialize output if not initialized
- const TensorInfo out_info(out_shape, input1->num_channels(), input1->data_type());
- auto_init_if_empty(*output, out_info);
-
- Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration_complex));
- Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
- Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
-
- AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration_complex);
- AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration_complex);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration_complex);
-
- bool window_changed = update_window_and_padding(win_input1, input1_access)
- || update_window_and_padding(win_input2, input2_access)
- || update_window_and_padding(win, output_access);
-
- output_access.set_valid_region(win, valid_region);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLComplexPixelWiseMultiplicationKernel::CLComplexPixelWiseMultiplicationKernel()
- : _input1(nullptr), _input2(nullptr), _output(nullptr)
-{
-}
-
-void CLComplexPixelWiseMultiplicationKernel::configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info);
-}
-
-void CLComplexPixelWiseMultiplicationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1, input2, output, act_info));
-
- // Configure kernel window
- auto win_config = validate_and_configure_window_complex(input1, input2, output);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
-
- _input1 = input1;
- _input2 = input2;
- _output = output;
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(_output->data_type()));
- if(act_info.enabled())
- {
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, "pixelwise_mul_complex", build_opts.options());
-
- ICLKernel::configure_internal(win_config.second);
-}
-
-Status CLComplexPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_complex(input1, input2, output, act_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_complex(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
-
- return Status{};
-}
-
-void CLComplexPixelWiseMultiplicationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src_0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto src_1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- const TensorShape &in_shape1 = src_0->info()->tensor_shape();
- const TensorShape &in_shape2 = src_1->info()->tensor_shape();
- const TensorShape &out_shape = dst->info()->tensor_shape();
-
- bool can_collapse = true;
- if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
- {
- can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
- for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
- {
- can_collapse = (in_shape1[d] == in_shape2[d]);
- }
- }
-
- bool has_collapsed = false;
- Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
-
- const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
- const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
-
- Window slice = collapsed.first_slice_window_3D();
- Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
- Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src_0, slice_input1);
- add_3D_tensor_argument(idx, src_1, slice_input2);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
-
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-
-BorderSize CLComplexPixelWiseMultiplicationKernel::border_size() const
-{
- const unsigned int replicateSize = _output->dimension(0) - std::min(_input1->dimension(0), _input2->dimension(0));
- const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration_complex - 1U, replicateSize);
- return BorderSize{ 0, border, 0, 0 };
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
deleted file mode 100644
index 74102fd397..0000000000
--- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLPIXELWISEMULTIPLICATIONKERNEL_H
-#define ARM_COMPUTE_CLPIXELWISEMULTIPLICATIONKERNEL_H
-
-#include "arm_compute/core/Types.h"
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-// Forward declarations
-class ICLTensor;
-
-/** Interface for the pixelwise multiplication kernel. */
-class CLPixelWiseMultiplicationKernel : public ICLKernel
-{
-public:
- /** Default constructor.*/
- CLPixelWiseMultiplicationKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLPixelWiseMultiplicationKernel(const CLPixelWiseMultiplicationKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLPixelWiseMultiplicationKernel &operator=(const CLPixelWiseMultiplicationKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLPixelWiseMultiplicationKernel(CLPixelWiseMultiplicationKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLPixelWiseMultiplicationKernel &operator=(CLPixelWiseMultiplicationKernel &&) = default;
- /** Initialise the kernel's input, output and border mode.
- *
- * Valid configurations (Input1,Input2) -> Output :
- *
- * - (U8,U8) -> U8
- * - (U8,U8) -> S16
- * - (U8,S16) -> S16
- * - (S16,U8) -> S16
- * - (S16,S16) -> S16
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16
- * - (QSYMM16,QSYMM16) -> S32
- *
- * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[in] input2 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[out] output The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
- * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
- * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Initialise the kernel's input, output and border mode.
- *
- * Valid configurations (Input1,Input2) -> Output :
- *
- * - (U8,U8) -> U8
- * - (U8,U8) -> S16
- * - (U8,S16) -> S16
- * - (S16,U8) -> S16
- * - (S16,S16) -> S16
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16
- * - (QSYMM16,QSYMM16) -> S32
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[in] input2 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[out] output The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
- * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
- * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplicationKernel
- *
- * Valid configurations (Input1,Input2) -> Output :
- *
- * - (U8,U8) -> U8
- * - (U8,U8) -> S16
- * - (U8,S16) -> S16
- * - (S16,U8) -> S16
- * - (S16,S16) -> S16
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16
- * - (QSYMM16,QSYMM16) -> S32
- *
- * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[in] input2 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[in] output The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
- * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
- * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-private:
- const ITensorInfo *_input1;
- const ITensorInfo *_input2;
- ITensorInfo *_output;
-};
-
-/** Interface for the complex pixelwise multiplication kernel. */
-class CLComplexPixelWiseMultiplicationKernel : public ICLKernel
-{
-public:
- /** Default constructor.*/
- CLComplexPixelWiseMultiplicationKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLComplexPixelWiseMultiplicationKernel(const CLComplexPixelWiseMultiplicationKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLComplexPixelWiseMultiplicationKernel &operator=(const CLComplexPixelWiseMultiplicationKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLComplexPixelWiseMultiplicationKernel(CLComplexPixelWiseMultiplicationKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLComplexPixelWiseMultiplicationKernel &operator=(CLComplexPixelWiseMultiplicationKernel &&) = default;
- /** Initialise the kernel's input, output and border mode.
- *
- * @param[in] input1 An input tensor info. Data types supported: F16/F32. Number of channels supported: 2.
- * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
- * @param[out] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Initialise the kernel's input, output and border mode.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2.
- * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
- * @param[out] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplicationKernel
- *
- * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2.
- * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
- * @param[in] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-private:
- const ITensorInfo *_input1;
- const ITensorInfo *_input2;
- ITensorInfo *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLPIXELWISEMULTIPLICATIONKERNEL_H */
diff --git a/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp b/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp
index 7b9caf0063..7dcdf1de6f 100644
--- a/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,10 +30,10 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
using namespace arm_compute::misc::shape_calculator;
@@ -42,7 +42,10 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const PriorBoxLayerInfo &info)
+Status validate_arguments(const ITensorInfo *input1,
+ const ITensorInfo *input2,
+ const ITensorInfo *output,
+ const PriorBoxLayerInfo &info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::F32);
@@ -51,10 +54,10 @@ Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2,
// Check variances
const int var_size = info.variances().size();
- if(var_size > 1)
+ if (var_size > 1)
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(var_size != 4, "Must provide 4 variance values");
- for(int i = 0; i < var_size; ++i)
+ for (int i = 0; i < var_size; ++i)
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(var_size <= 0, "Must be greater than 0");
}
@@ -62,17 +65,19 @@ Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2,
ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.steps()[0] < 0.f, "Step x should be greater or equal to 0");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.steps()[1] < 0.f, "Step y should be greater or equal to 0");
- if(!info.max_sizes().empty())
+ if (!info.max_sizes().empty())
{
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.max_sizes().size() != info.min_sizes().size(), "Max and min sizes dimensions should match");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.max_sizes().size() != info.min_sizes().size(),
+ "Max and min sizes dimensions should match");
}
- for(unsigned int i = 0; i < info.max_sizes().size(); ++i)
+ for (unsigned int i = 0; i < info.max_sizes().size(); ++i)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.max_sizes()[i] < info.min_sizes()[i], "Max size should be greater than min size");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.max_sizes()[i] < info.min_sizes()[i],
+ "Max size should be greater than min size");
}
- if(output != nullptr && output->total_size() != 0)
+ if (output != nullptr && output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(1) != 2);
}
@@ -80,7 +85,11 @@ Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2,
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const PriorBoxLayerInfo &info, int num_priors)
+std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input1,
+ const ITensorInfo *input2,
+ ITensorInfo *output,
+ const PriorBoxLayerInfo &info,
+ int num_priors)
{
ARM_COMPUTE_UNUSED(input2);
// Output tensor auto initialization if not yet initialized
@@ -88,10 +97,11 @@ std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input
auto_init_if_empty(*output, output_shape, 1, input1->data_type());
const unsigned int num_elems_processed_per_iteration = 4 * num_priors;
- Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
+ Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
bool window_changed = update_window_and_padding(win, output_access);
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ Status err =
+ (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
} // namespace
@@ -99,15 +109,28 @@ std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input
CLPriorBoxLayerKernel::CLPriorBoxLayerKernel()
: _input1(nullptr), _input2(nullptr), _output(nullptr), _info(), _num_priors(), _min(), _max(), _aspect_ratios()
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLPriorBoxLayerKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min, cl::Buffer *max, cl::Buffer *aspect_ratios)
+void CLPriorBoxLayerKernel::configure(const ICLTensor *input1,
+ const ICLTensor *input2,
+ ICLTensor *output,
+ const PriorBoxLayerInfo &info,
+ cl::Buffer *min,
+ cl::Buffer *max,
+ cl::Buffer *aspect_ratios)
{
configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, info, min, max, aspect_ratios);
}
-void CLPriorBoxLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min,
- cl::Buffer *max, cl::Buffer *aspect_ratios)
+void CLPriorBoxLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input1,
+ const ICLTensor *input2,
+ ICLTensor *output,
+ const PriorBoxLayerInfo &info,
+ cl::Buffer *min,
+ cl::Buffer *max,
+ cl::Buffer *aspect_ratios)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
@@ -134,7 +157,7 @@ void CLPriorBoxLayerKernel::configure(const CLCompileContext &compile_context, c
int img_width = info.img_size().x;
int img_height = info.img_size().y;
- if(img_width == 0 || img_height == 0)
+ if (img_width == 0 || img_height == 0)
{
img_width = input2->info()->dimension(width_idx);
img_height = input2->info()->dimension(height_idx);
@@ -142,7 +165,7 @@ void CLPriorBoxLayerKernel::configure(const CLCompileContext &compile_context, c
float step_x = info.steps()[0];
float step_y = info.steps()[0];
- if(step_x == 0.f || step_y == 0.f)
+ if (step_x == 0.f || step_y == 0.f)
{
step_x = static_cast<float>(img_width) / layer_width;
step_y = static_cast<float>(img_height) / layer_height;
@@ -161,18 +184,20 @@ void CLPriorBoxLayerKernel::configure(const CLCompileContext &compile_context, c
build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(info.offset()));
build_opts.add_option_if(info.clip(), "-DIN_PLACE");
- if(info.variances().size() > 1)
+ if (info.variances().size() > 1)
{
- for(unsigned int i = 0; i < info.variances().size(); ++i)
+ for (unsigned int i = 0; i < info.variances().size(); ++i)
{
- build_opts.add_option("-DVARIANCE_" + support::cpp11::to_string(i) + "=" + support::cpp11::to_string(info.variances().at(i)));
+ build_opts.add_option("-DVARIANCE_" + support::cpp11::to_string(i) + "=" +
+ support::cpp11::to_string(info.variances().at(i)));
}
}
else
{
- for(unsigned int i = 0; i < 4; ++i)
+ for (unsigned int i = 0; i < 4; ++i)
{
- build_opts.add_option("-DVARIANCE_" + support::cpp11::to_string(i) + "=" + support::cpp11::to_string(info.variances().at(0)));
+ build_opts.add_option("-DVARIANCE_" + support::cpp11::to_string(i) + "=" +
+ support::cpp11::to_string(info.variances().at(0)));
}
}
@@ -193,13 +218,17 @@ void CLPriorBoxLayerKernel::configure(const CLCompileContext &compile_context, c
ICLKernel::configure_internal(win_config.second);
}
-Status CLPriorBoxLayerKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const PriorBoxLayerInfo &info)
+Status CLPriorBoxLayerKernel::validate(const ITensorInfo *input1,
+ const ITensorInfo *input2,
+ const ITensorInfo *output,
+ const PriorBoxLayerInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, info));
const int num_priors = info.aspect_ratios().size() * info.min_sizes().size() + info.max_sizes().size();
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get(), info, num_priors)
- .first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(),
+ output->clone().get(), info, num_priors)
+ .first);
return Status{};
}
@@ -210,8 +239,9 @@ void CLPriorBoxLayerKernel::run(const Window &window, cl::CommandQueue &queue)
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
queue.enqueueWriteBuffer(*_min, CL_TRUE, 0, _info.min_sizes().size() * sizeof(float), _info.min_sizes().data());
- queue.enqueueWriteBuffer(*_aspect_ratios, CL_TRUE, 0, _info.aspect_ratios().size() * sizeof(float), _info.aspect_ratios().data());
- if(!_info.max_sizes().empty())
+ queue.enqueueWriteBuffer(*_aspect_ratios, CL_TRUE, 0, _info.aspect_ratios().size() * sizeof(float),
+ _info.aspect_ratios().data());
+ if (!_info.max_sizes().empty())
{
queue.enqueueWriteBuffer(*_max, CL_TRUE, 0, _info.max_sizes().size() * sizeof(float), _info.max_sizes().data());
}
diff --git a/src/core/CL/kernels/CLPriorBoxLayerKernel.h b/src/core/CL/kernels/CLPriorBoxLayerKernel.h
index 6c369a7a4e..a50e0c5ff5 100644
--- a/src/core/CL/kernels/CLPriorBoxLayerKernel.h
+++ b/src/core/CL/kernels/CLPriorBoxLayerKernel.h
@@ -57,7 +57,13 @@ public:
* @param[in] max Maximum prior box values
* @param[in] aspect_ratios Aspect ratio values
*/
- void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min, cl::Buffer *max, cl::Buffer *aspect_ratios);
+ void configure(const ICLTensor *input1,
+ const ICLTensor *input2,
+ ICLTensor *output,
+ const PriorBoxLayerInfo &info,
+ cl::Buffer *min,
+ cl::Buffer *max,
+ cl::Buffer *aspect_ratios);
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
@@ -69,8 +75,14 @@ public:
* @param[in] max Maximum prior box values
* @param[in] aspect_ratios Aspect ratio values
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min, cl::Buffer *max,
- cl::Buffer *aspect_ratios);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input1,
+ const ICLTensor *input2,
+ ICLTensor *output,
+ const PriorBoxLayerInfo &info,
+ cl::Buffer *min,
+ cl::Buffer *max,
+ cl::Buffer *aspect_ratios);
/** Static function to check if given info will lead to a valid configuration of @ref CLPriorBoxLayerKernel
*
* @param[in] input1 First source tensor info. Data types supported: F32. Data layouts supported: NCHW/NHWC.
@@ -80,14 +92,17 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const PriorBoxLayerInfo &info);
+ static Status validate(const ITensorInfo *input1,
+ const ITensorInfo *input2,
+ const ITensorInfo *output,
+ const PriorBoxLayerInfo &info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
private:
- const ICLTensor *_input1;
- const ICLTensor *_input2;
+ const ICLTensor *_input1;
+ const ICLTensor *_input2;
ICLTensor *_output;
PriorBoxLayerInfo _info;
int _num_priors;
diff --git a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp
index ccc61783c4..731fcb8e04 100644
--- a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp
+++ b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,8 +22,12 @@
* SOFTWARE.
*/
#include "src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h"
+
#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
@@ -47,15 +51,19 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
const uint32_t temp_num_elems_processed_per_iteration = max_cl_vector_width / input->element_size();
/* If width is less then step, then make step same as width to avoid global size being step instead of actual width. */
/* Or we should fix in arm_compute::enqueue() or arm_compute::calculate_max_window(). */
- const uint32_t num_elems_processed_per_iteration = (input->dimension(0) < temp_num_elems_processed_per_iteration) ? input->dimension(0) : temp_num_elems_processed_per_iteration;
+ const uint32_t num_elems_processed_per_iteration = (input->dimension(0) < temp_num_elems_processed_per_iteration)
+ ? input->dimension(0)
+ : temp_num_elems_processed_per_iteration;
// This kernel doesn't need padding
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
return std::make_pair(Status{}, win);
}
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *weight,
+ const ITensorInfo *bias)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weight, bias, output);
@@ -71,7 +79,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(weight, bias);
// Checks performed when output is configured
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
@@ -83,12 +91,17 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
CLQLSTMLayerNormalizationKernel::CLQLSTMLayerNormalizationKernel()
: _input(nullptr), _weight(nullptr), _bias(nullptr), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLQLSTMLayerNormalizationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *weight, const ICLTensor *bias)
+void CLQLSTMLayerNormalizationKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *weight,
+ const ICLTensor *bias)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weight, bias, output);
- auto padding_info = get_padding_info({ input, weight, bias, output });
+ auto padding_info = get_padding_info({input, weight, bias, output});
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), weight->info(), bias->info()));
@@ -102,7 +115,8 @@ void CLQLSTMLayerNormalizationKernel::configure(const CLCompileContext &compile_
int32_t output_multiplier{};
int32_t output_shift{};
const UniformQuantizationInfo quan_info = _weight->info()->quantization_info().uniform();
- const Status status = quantization::calculate_quantized_multiplier(quan_info.scale, &output_multiplier, &output_shift);
+ const Status status =
+ quantization::calculate_quantized_multiplier(quan_info.scale, &output_multiplier, &output_shift);
output_shift *= -1;
// Set build options
@@ -112,8 +126,12 @@ void CLQLSTMLayerNormalizationKernel::configure(const CLCompileContext &compile_
build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
build_opts.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
build_opts.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
- build_opts.add_option("-DMIN_BOUND=" + support::cpp11::to_string(std::get<0>(quantization::get_min_max_values_from_quantized_data_type(input->info()->data_type()))));
- build_opts.add_option("-DMAX_BOUND=" + support::cpp11::to_string(std::get<1>(quantization::get_min_max_values_from_quantized_data_type(input->info()->data_type()))));
+ build_opts.add_option("-DMIN_BOUND=" +
+ support::cpp11::to_string(std::get<0>(
+ quantization::get_min_max_values_from_quantized_data_type(input->info()->data_type()))));
+ build_opts.add_option("-DMAX_BOUND=" +
+ support::cpp11::to_string(std::get<1>(
+ quantization::get_min_max_values_from_quantized_data_type(input->info()->data_type()))));
// Create kernel
_kernel = create_kernel(compile_context, "qlstm_layer_normalization", build_opts.options());
@@ -133,12 +151,18 @@ void CLQLSTMLayerNormalizationKernel::configure(const CLCompileContext &compile_
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-void CLQLSTMLayerNormalizationKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *weight, const ICLTensor *bias)
+void CLQLSTMLayerNormalizationKernel::configure(const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *weight,
+ const ICLTensor *bias)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, weight, bias);
}
-Status CLQLSTMLayerNormalizationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias)
+Status CLQLSTMLayerNormalizationKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *weight,
+ const ITensorInfo *bias)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, weight, bias));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
@@ -169,7 +193,6 @@ void CLQLSTMLayerNormalizationKernel::run(const Window &window, cl::CommandQueue
add_2D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_2D(slice));
+ } while (window.slide_window_slice_2D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h
index 31085c37ba..ba912e1d2d 100644
--- a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h
+++ b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h
@@ -63,7 +63,11 @@ public:
* @param[in] weight Weight tensor. Data types supported: Same as @p input.
* @param[in] bias Bias tensor. Data types supported: S32.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *weight, const ICLTensor *bias);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *weight,
+ const ICLTensor *bias);
/** Static function to check if given info will lead to a valid configuration of @ref CLQLSTMLayerNormalizationKernel
*
* @param[in] input Source tensor info with 2 dimensions. Data types supported: QSYMM16.
@@ -73,7 +77,8 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias);
+ static Status
+ validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
deleted file mode 100644
index 76e703f0dd..0000000000
--- a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLQuantizationLayerKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
-
- // Output must always be initialized
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape().total_size() == 0);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QASYMM16);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
-
- return Status{};
-}
-} // namespace
-
-CLQuantizationLayerKernel::CLQuantizationLayerKernel()
- : _input(nullptr), _output(nullptr)
-{
-}
-
-void CLQuantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output);
-}
-
-void CLQuantizationLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- auto padding_info = get_padding_info({ input, output });
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
-
- _input = input;
- _output = output;
-
- const int vec_size_x = 16 / input->info()->element_size();
- const int input_width_x = input->info()->tensor_shape().x();
- const bool multi_access_x = (input_width_x / vec_size_x > 0);
-
- const UniformQuantizationInfo qinfo = output->info()->quantization_info().uniform();
- const DataType output_data_type = output->info()->data_type();
-
- float scale_to_apply = qinfo.scale;
- int32_t offset_to_apply = qinfo.offset;
- if(is_data_type_quantized_asymmetric(_input->info()->data_type()))
- {
- /*
- * In case of requantization of a quantized input tensor to an output tensor with another quantization
- * instead of of apply dequantization and then a quantization functions, we just compute new scale and
- * offset to apply.
- *
- * Assuming:
- * - q_i as input quantized value
- * - q_o as output quantized value
- * - z_i as input quantization offset value
- * - z_o as output quantization offset value
- * - s_i as input quantization scale value
- * - s_o as output quantization scale value
- * - z_n as new quantization offset value
- * - s_n as new quantization scale value
- *
- * q_o = ( q_i - z_i ) * s_i / s_o + z_o
- *
- * We can rewrite the formula as:
- *
- * q_o = ( q_i * s_i / s_o ) - z_i * s_i / s_o + z_o
- *
- * q_o = q_i / s_n + z_n
- *
- * Where:
- *
- * s_n = s_o / s_i
- *
- * z_n = - z_i * s_i / s_o + z_o
- *
- */
- const UniformQuantizationInfo qinfo_in = _input->info()->quantization_info().uniform();
- scale_to_apply /= qinfo_in.scale;
- // In order to minimize flooring we convert the offset to a float,
- // then compute the new offset in the float domain,
- // finally we convert it back as int32_t
- offset_to_apply -= static_cast<int32_t>(static_cast<float>(qinfo_in.offset) * qinfo_in.scale / qinfo.scale);
- }
-
- // Create kernel
- CLBuildOptions build_opts;
- build_opts.add_option_if(is_data_type_float(_input->info()->data_type()), "-DIS_FLOAT");
- build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(scale_to_apply));
- build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(offset_to_apply));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output_data_type));
- build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(input_width_x - vec_size_x, 0)));
- std::pair<int, int> min_max_quant_values = quantization::get_min_max_values_from_quantized_data_type(output_data_type);
- build_opts.add_option("-DMIN_QUANT_VAL=" + support::cpp11::to_string(min_max_quant_values.first));
- build_opts.add_option("-DMAX_QUANT_VAL=" + support::cpp11::to_string(min_max_quant_values.second));
-
- _kernel = create_kernel(compile_context, "quantization_layer", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps());
- if(multi_access_x)
- {
- win.set(Window::DimX, Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
- }
- ICLKernel::configure_internal(win);
-
- output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLQuantizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
- return Status{};
-}
-
-void CLQuantizationLayerKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), 3);
- Window slice = window_collapsed.first_slice_window_3D();
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLQuantizationLayerKernel.h b/src/core/CL/kernels/CLQuantizationLayerKernel.h
deleted file mode 100644
index e9d03decb3..0000000000
--- a/src/core/CL/kernels/CLQuantizationLayerKernel.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLQUANTIZATIONLAYERKERNEL_H
-#define ARM_COMPUTE_CLQUANTIZATIONLAYERKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the quantization layer kernel.
- *
- * @note The implementation supports only 3D input tensors.
- */
-class CLQuantizationLayerKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLQuantizationLayerKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLQuantizationLayerKernel(const CLQuantizationLayerKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLQuantizationLayerKernel &operator=(const CLQuantizationLayerKernel &) = delete;
- /** Default Move Constructor. */
- CLQuantizationLayerKernel(CLQuantizationLayerKernel &&) = default;
- /** Default move assignment operator */
- CLQuantizationLayerKernel &operator=(CLQuantizationLayerKernel &&) = default;
- /** Default destructor */
- ~CLQuantizationLayerKernel() = default;
- /** Set the input, output.
- *
- * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
- * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
- *
- * @note Output auto initialization is not supported by this kernel
- */
- void configure(const ICLTensor *input, ICLTensor *output);
- /** Set the input, output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
- * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
- *
- * @note Output auto initialization is not supported by this kernel
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayerKernel
- *
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
- * @param[in] output Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLQUANTIZATIONLAYERKERNEL_H */
diff --git a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
index 87f4a5d7f3..c97910ef79 100644
--- a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
+++ b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,14 +25,13 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLArray.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -44,24 +43,29 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *rois, ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *rois,
+ ITensorInfo *output,
+ const ROIPoolingLayerInfo &pool_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, rois, output);
ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5);
ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F32, DataType::F16);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
+ DataType::F32, DataType::F16);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC, DataLayout::NCHW);
ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0));
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(compute_roi_align_shape(*input, *rois, pool_info), output->tensor_shape());
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(compute_roi_align_shape(*input, *rois, pool_info),
+ output->tensor_shape());
}
- if(is_data_type_quantized_asymmetric(input->data_type()))
+ if (is_data_type_quantized_asymmetric(input->data_type()))
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(rois, 1, DataType::QASYMM16);
@@ -81,14 +85,22 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *rois, ITe
CLROIAlignLayerKernel::CLROIAlignLayerKernel()
: _input(nullptr), _output(nullptr), _rois(nullptr), _pool_info(0, 0, 0.f)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLROIAlignLayerKernel::configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
+void CLROIAlignLayerKernel::configure(const ICLTensor *input,
+ const ICLTensor *rois,
+ ICLTensor *output,
+ const ROIPoolingLayerInfo &pool_info)
{
configure(CLKernelLibrary::get().get_compile_context(), input, rois, output, pool_info);
}
-void CLROIAlignLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
+void CLROIAlignLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *rois,
+ ICLTensor *output,
+ const ROIPoolingLayerInfo &pool_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, rois);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), rois->info(), output->info(), pool_info));
@@ -98,7 +110,7 @@ void CLROIAlignLayerKernel::configure(const CLCompileContext &compile_context, c
auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
output->info()->set_data_layout(input->info()->data_layout());
- auto padding_info = get_padding_info({ input, rois, output });
+ auto padding_info = get_padding_info({input, rois, output});
_input = input;
_output = output;
@@ -112,16 +124,23 @@ void CLROIAlignLayerKernel::configure(const CLCompileContext &compile_context, c
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
build_opts.add_option("-DDATA_SIZE=" + get_data_size_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DMAX_DIM_X=" + support::cpp11::to_string(_input->info()->dimension(get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH))));
- build_opts.add_option("-DMAX_DIM_Y=" + support::cpp11::to_string(_input->info()->dimension(get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT))));
- build_opts.add_option("-DMAX_DIM_Z=" + support::cpp11::to_string(_input->info()->dimension(get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL))));
+ build_opts.add_option("-DMAX_DIM_X=" +
+ support::cpp11::to_string(_input->info()->dimension(get_data_layout_dimension_index(
+ input->info()->data_layout(), DataLayoutDimension::WIDTH))));
+ build_opts.add_option("-DMAX_DIM_Y=" +
+ support::cpp11::to_string(_input->info()->dimension(get_data_layout_dimension_index(
+ input->info()->data_layout(), DataLayoutDimension::HEIGHT))));
+ build_opts.add_option("-DMAX_DIM_Z=" +
+ support::cpp11::to_string(_input->info()->dimension(get_data_layout_dimension_index(
+ input->info()->data_layout(), DataLayoutDimension::CHANNEL))));
build_opts.add_option("-DPOOLED_DIM_X=" + support::cpp11::to_string(pool_info.pooled_width()));
build_opts.add_option("-DPOOLED_DIM_Y=" + support::cpp11::to_string(pool_info.pooled_height()));
build_opts.add_option("-DSPATIAL_SCALE=" + float_to_string_with_full_precision(pool_info.spatial_scale()));
build_opts.add_option_if(input->info()->data_layout() == DataLayout::NHWC, "-DNHWC");
- build_opts.add_option_if(pool_info.sampling_ratio() > 0, "-DSAMPLING_RATIO=" + support::cpp11::to_string(pool_info.sampling_ratio()));
+ build_opts.add_option_if(pool_info.sampling_ratio() > 0,
+ "-DSAMPLING_RATIO=" + support::cpp11::to_string(pool_info.sampling_ratio()));
- if(is_qasymm)
+ if (is_qasymm)
{
const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
const UniformQuantizationInfo roisq_info = rois->info()->quantization_info().uniform();
@@ -145,7 +164,10 @@ void CLROIAlignLayerKernel::configure(const CLCompileContext &compile_context, c
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLROIAlignLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *rois, ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
+Status CLROIAlignLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *rois,
+ ITensorInfo *output,
+ const ROIPoolingLayerInfo &pool_info)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, rois, output, pool_info));
return Status{};
diff --git a/src/core/CL/kernels/CLROIAlignLayerKernel.h b/src/core/CL/kernels/CLROIAlignLayerKernel.h
index cbf0e00165..2e84e5d303 100644
--- a/src/core/CL/kernels/CLROIAlignLayerKernel.h
+++ b/src/core/CL/kernels/CLROIAlignLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,9 +24,7 @@
#ifndef ARM_COMPUTE_CLROIALIGNLAYERKERNEL_H
#define ARM_COMPUTE_CLROIALIGNLAYERKERNEL_H
-#include "arm_compute/core/CL/ICLArray.h"
#include "src/core/CL/ICLKernel.h"
-
namespace arm_compute
{
class ICLTensor;
@@ -63,7 +61,8 @@ public:
* @note The z dimensions of @p output tensor and @p input tensor must be the same.
* @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois array.
*/
- void configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info);
+ void
+ configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info);
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
@@ -79,7 +78,11 @@ public:
* @note The z dimensions of @p output tensor and @p input tensor must be the same.
* @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois array.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *rois,
+ ICLTensor *output,
+ const ROIPoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLROIAlignLayerKernel
*
* @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
@@ -95,7 +98,10 @@ public:
*
* @return a Status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *rois, ITensorInfo *output, const ROIPoolingLayerInfo &pool_info);
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *rois,
+ ITensorInfo *output,
+ const ROIPoolingLayerInfo &pool_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue);
diff --git a/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp b/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp
index 43492a3d50..1b2c414a49 100644
--- a/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,82 +25,82 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLArray.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
-#include "src/core/AccessWindowStatic.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
+#include <cfloat>
#include <cmath>
-#include <set>
#include <string>
namespace arm_compute
{
-namespace
-{
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *rois, ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
+CLROIPoolingLayerKernel::CLROIPoolingLayerKernel()
+ : _input(nullptr), _rois(nullptr), _output(nullptr), _pool_info(0, 0, 0.f)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output auto initialization if not yet initialized
- TensorShape output_shape(pool_info.pooled_width(), pool_info.pooled_height(), input->dimension(2), rois->dimension(1));
- auto_init_if_empty((*output), output_shape, 1, input->data_type());
-
- // Configure kernel window
- constexpr unsigned int num_elems_processed_per_iteration = 1;
- Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
-
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal input_access(input, input->valid_region().start(0), num_elems_processed_per_iteration);
-
- bool window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
+ _type = CLKernelType::ELEMENTWISE;
}
-} // namespace
-CLROIPoolingLayerKernel::CLROIPoolingLayerKernel()
- : _input(nullptr), _rois(nullptr), _output(nullptr), _pool_info(0, 0, 0.f)
+Status CLROIPoolingLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *rois,
+ const ITensorInfo *output,
+ const ROIPoolingLayerInfo &pool_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, rois, output);
+
+ //Validate arguments
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, rois, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(rois, 1, DataType::U16);
+ ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5);
+ ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2);
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0));
+
+ if (output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) != pool_info.pooled_width()) ||
+ (output->dimension(1) != pool_info.pooled_height()));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != output->dimension(2));
+ ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(1) != output->dimension(3));
+ }
+
+ return Status{};
}
-void CLROIPoolingLayerKernel::configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
+void CLROIPoolingLayerKernel::configure(const ICLTensor *input,
+ const ICLTensor *rois,
+ ICLTensor *output,
+ const ROIPoolingLayerInfo &pool_info)
{
configure(CLKernelLibrary::get().get_compile_context(), input, rois, output, pool_info);
}
-void CLROIPoolingLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
+void CLROIPoolingLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *rois,
+ const ICLTensor *output,
+ const ROIPoolingLayerInfo &pool_info)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, rois, output);
+ ARM_COMPUTE_ERROR_THROW_ON(
+ CLROIPoolingLayerKernel::validate(input->info(), rois->info(), output->info(), pool_info));
- //Validate arguments
- ARM_COMPUTE_ERROR_ON_NULLPTR(input->info(), rois->info(), output->info());
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(rois, 1, DataType::U16);
- ARM_COMPUTE_ERROR_ON(rois->info()->dimension(0) != 5);
- ARM_COMPUTE_ERROR_ON(rois->info()->num_dimensions() > 2);
- ARM_COMPUTE_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
- ARM_COMPUTE_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0));
-
- if(output->info()->total_size() != 0)
- {
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) != pool_info.pooled_width()) || (output->info()->dimension(1) != pool_info.pooled_height()));
- ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != output->info()->dimension(2));
- ARM_COMPUTE_ERROR_ON(rois->info()->dimension(1) != output->info()->dimension(3));
- }
+ auto padding_info = get_padding_info({input, rois, output});
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), rois->info(), output->info(), pool_info);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ // Output auto initialization if not yet initialized
+ TensorShape output_shape(pool_info.pooled_width(), pool_info.pooled_height(), input->info()->dimension(2),
+ rois->info()->dimension(1));
+ auto_init_if_empty(*(output->info()), output_shape, 1, input->info()->data_type(),
+ output->info()->quantization_info());
// Set instance variables
_input = input;
@@ -108,27 +108,46 @@ void CLROIPoolingLayerKernel::configure(const CLCompileContext &compile_context,
_output = output;
_pool_info = pool_info;
+ const DataType data_type = input->info()->data_type();
+ const bool is_qasymm = is_data_type_quantized_asymmetric(data_type);
+
// Set build options
- std::set<std::string> build_opts;
- build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
- build_opts.emplace(("-DDATA_SIZE=" + get_data_size_from_data_type(input->info()->data_type())));
- build_opts.emplace(("-DMAX_DIM_X=" + support::cpp11::to_string(_input->info()->dimension(Window::DimX))));
- build_opts.emplace(("-DMAX_DIM_Y=" + support::cpp11::to_string(_input->info()->dimension(Window::DimY))));
- build_opts.emplace(("-DMAX_DIM_Z=" + support::cpp11::to_string(_input->info()->dimension(Window::DimZ))));
- build_opts.emplace(("-DPOOLED_DIM_X=" + support::cpp11::to_string(pool_info.pooled_width())));
- build_opts.emplace(("-DPOOLED_DIM_Y=" + support::cpp11::to_string(pool_info.pooled_height())));
- build_opts.emplace(("-DSPATIAL_SCALE=" + support::cpp11::to_string(pool_info.spatial_scale())));
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_opts.add_option("-DDATA_SIZE=" + get_data_size_from_data_type(data_type));
+ build_opts.add_option("-DMAX_DIM_X=" + support::cpp11::to_string(_input->info()->dimension(Window::DimX)));
+ build_opts.add_option("-DMAX_DIM_Y=" + support::cpp11::to_string(_input->info()->dimension(Window::DimY)));
+ build_opts.add_option("-DMAX_DIM_Z=" + support::cpp11::to_string(_input->info()->dimension(Window::DimZ)));
+ build_opts.add_option("-DPOOLED_DIM_X=" + support::cpp11::to_string(pool_info.pooled_width()));
+ build_opts.add_option("-DPOOLED_DIM_Y=" + support::cpp11::to_string(pool_info.pooled_height()));
+ build_opts.add_option("-DSPATIAL_SCALE=" + support::cpp11::to_string(pool_info.spatial_scale()));
+
+ if (is_qasymm)
+ {
+ // Determine quantization info scale, offset
+ UniformQuantizationInfo uqinfo = UniformQuantizationInfo();
+ uqinfo = compute_requantization_scale_offset(_input->info()->quantization_info().uniform(),
+ _output->info()->quantization_info().uniform());
+ build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(uqinfo.offset));
+ build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(uqinfo.scale));
+
+ // Specify minimum possible value of datatype
+ build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(0));
+ }
+ else
+ {
+ // Specify min value of F32 datatype
+ build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(-FLT_MAX));
+ }
+
+ Window win = calculate_max_window(*(output->info()), Steps());
+ ICLKernel::configure_internal(win);
// Create kernel
std::string kernel_name = "roi_pooling_layer";
- _kernel = create_kernel(compile_context, kernel_name, build_opts);
-
- // Set static kernel arguments
- unsigned int idx = 2 * num_arguments_per_3D_tensor() + num_arguments_per_1D_array();
- add_argument<cl_uint>(idx, _input->info()->strides_in_bytes()[3]);
- add_argument<cl_uint>(idx, _output->info()->strides_in_bytes()[3]);
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- ICLKernel::configure_internal(win_config.second);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
void CLROIPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue)
diff --git a/src/core/CL/kernels/CLROIPoolingLayerKernel.h b/src/core/CL/kernels/CLROIPoolingLayerKernel.h
index 35f42a9676..80bfb63092 100644
--- a/src/core/CL/kernels/CLROIPoolingLayerKernel.h
+++ b/src/core/CL/kernels/CLROIPoolingLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,9 +25,6 @@
#define ARM_COMPUTE_CLROIPOOLINGLAYERKERNEL_H
#include "src/core/CL/ICLKernel.h"
-
-#include "arm_compute/core/CL/ICLArray.h"
-
namespace arm_compute
{
class ICLTensor;
@@ -62,11 +59,12 @@ public:
* @note The z dimensions of @p output tensor and @p input tensor must be the same.
* @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois array.
*/
- void configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info);
+ void
+ configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info);
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. Data types supported: F16/F32.
+ * @param[in] input Source tensor. Data types supported: F16/F32/QASYMM8
* @param[in] rois ROIs tensor, it is a 2D tensor of size [5, N] (where N is the number of ROIs) containing top left and bottom right corner
* as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ]. Data types supported: U16
* @param[out] output Destination tensor. Data types supported: Same as @p input.
@@ -77,15 +75,37 @@ public:
* @note The z dimensions of @p output tensor and @p input tensor must be the same.
* @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois array.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *rois,
+ const ICLTensor *output,
+ const ROIPoolingLayerInfo &pool_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
+ /** Static Validate function to check inputs will lead to valid configuration of @ref CLROIPoolingLayer
+ *
+ * @param[in] input Source tensor. Data types supported: F16/F32/QASYMM8
+ * @param[in] rois ROIs tensor, it is a 2D tensor of size [5, N] (where N is the number of ROIs) containing top left and bottom right corner
+ * as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ]. Data types supported: U16
+ * @param[out] output Destination tensor. Data types supported: Same as @p input.
+ * @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
+ *
+ * @note The x and y dimensions of @p output tensor must be the same as @p pool_info 's pooled
+ * width and pooled height.
+ * @note The z dimensions of @p output tensor and @p input tensor must be the same.
+ * @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois array.
+ */
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *rois,
+ const ITensorInfo *output,
+ const ROIPoolingLayerInfo &pool_info);
+
private:
const ICLTensor *_input;
const ICLTensor *_rois;
- ICLTensor *_output;
+ const ICLTensor *_output;
ROIPoolingLayerInfo _pool_info;
};
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLRangeKernel.cpp b/src/core/CL/kernels/CLRangeKernel.cpp
index 85f79988c9..622f6210b9 100644
--- a/src/core/CL/kernels/CLRangeKernel.cpp
+++ b/src/core/CL/kernels/CLRangeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,9 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -40,11 +43,8 @@ constexpr unsigned int vector_size_byte_opencl = 16;
Status validate_arguments(const ITensorInfo *output, const float start, const float end, const float step)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output,
- 1,
- DataType::U8, DataType::S8, DataType::QASYMM8,
- DataType::U16, DataType::S16,
- DataType::U32, DataType::S32,
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S8, DataType::QASYMM8,
+ DataType::U16, DataType::S16, DataType::U32, DataType::S32,
DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(output);
@@ -54,20 +54,24 @@ Status validate_arguments(const ITensorInfo *output, const float start, const fl
ARM_COMPUTE_RETURN_ERROR_ON_MSG((start == end), "start of the requested sequence must not be equal to the end");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!check_value_range(start, output->data_type(), output->quantization_info()), "start value is outside the range of the data type");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!check_value_range(end, output->data_type(), output->quantization_info()), "end value is outside the range of the data type");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!check_value_range(step, output->data_type(), output->quantization_info()), "step value is outside the range of the data type");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!check_value_range(start, output->data_type(), output->quantization_info()),
+ "start value is outside the range of the data type");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!check_value_range(end, output->data_type(), output->quantization_info()),
+ "end value is outside the range of the data type");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!check_value_range(step, output->data_type(), output->quantization_info()),
+ "step value is outside the range of the data type");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->num_dimensions() != 1, "Output has to be a 1-D tensor");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->tensor_shape().total_size() < num_of_elements_in_range(start, end, step), "Output tensor size is incorrect");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->tensor_shape().total_size() < num_of_elements_in_range(start, end, step),
+ "Output tensor size is incorrect");
return Status{};
}
} // namespace
-CLRangeKernel::CLRangeKernel()
- : _start(0), _end(1), _step(1), _output(nullptr)
+CLRangeKernel::CLRangeKernel() : _start(0), _end(1), _step(1), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLRangeKernel::configure(ICLTensor *output, const float start, const float end, const float step)
@@ -75,16 +79,18 @@ void CLRangeKernel::configure(ICLTensor *output, const float start, const float
configure(CLKernelLibrary::get().get_compile_context(), output, start, end, step);
}
-void CLRangeKernel::configure(const CLCompileContext &compile_context, ICLTensor *output, const float start, const float end, const float step)
+void CLRangeKernel::configure(
+ const CLCompileContext &compile_context, ICLTensor *output, const float start, const float end, const float step)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(output->info(), start, end, step));
// Configure kernel window
- unsigned int num_elems_processed_per_iteration = adjust_vec_size(vector_size_byte_opencl / output->info()->element_size(), output->info()->dimension(0));
- Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
+ unsigned int num_elems_processed_per_iteration =
+ adjust_vec_size(vector_size_byte_opencl / output->info()->element_size(), output->info()->dimension(0));
+ Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
- auto padding_info = get_padding_info({ output });
+ auto padding_info = get_padding_info({output});
_start = start;
_end = end;
@@ -97,10 +103,11 @@ void CLRangeKernel::configure(const CLCompileContext &compile_context, ICLTensor
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(output->info()->dimension(0) % num_elems_processed_per_iteration));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" +
+ support::cpp11::to_string(output->info()->dimension(0) % num_elems_processed_per_iteration));
build_opts.add_option("-DSTART=" + support::cpp11::to_string(start));
build_opts.add_option("-DSTEP=" + support::cpp11::to_string(step));
- if(is_data_type_quantized_asymmetric(output->info()->data_type()))
+ if (is_data_type_quantized_asymmetric(output->info()->data_type()))
{
const UniformQuantizationInfo qinfo = output->info()->quantization_info().uniform();
build_opts.add_option("-DOFFSET_OUT=" + support::cpp11::to_string(qinfo.offset));
@@ -135,4 +142,4 @@ void CLRangeKernel::run(const Window &window, cl::CommandQueue &queue)
enqueue(queue, *this, window, lws_hint());
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLRangeKernel.h b/src/core/CL/kernels/CLRangeKernel.h
index 1b94a099ed..65251a11e5 100644
--- a/src/core/CL/kernels/CLRangeKernel.h
+++ b/src/core/CL/kernels/CLRangeKernel.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLRANGEKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp
index 2697a0df98..c8665f8fbd 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.cpp
+++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,42 +28,47 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/StringUtils.h"
+#include "arm_compute/core/Validate.h"
+
#include "src/core/AccessWindowStatic.h"
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
namespace arm_compute
{
namespace
{
-// OpenCL kernel requires input width to be a power of 2 for x-axis.
-constexpr unsigned int border_val = 64;
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- if(input->num_channels() == 1)
+ if (input->num_channels() == 1)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
+ DataType::S32, DataType::F16, DataType::F32);
}
else
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(axis == 0);
}
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(op == ReductionOperation::SUM_SQUARE && input->data_type() == DataType::QASYMM8, "Not supported reduction operation for QASYMM8");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(op == ReductionOperation::SUM_SQUARE && input->data_type() == DataType::QASYMM8,
+ "Not supported reduction operation for QASYMM8");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions,
+ "Reduction axis greater than max number of dimensions");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
- ARM_COMPUTE_RETURN_ERROR_ON((op == ReductionOperation::MEAN_SUM) && (axis == 0) && (width == 0) && (input->data_type() != DataType::QASYMM8) && (input->data_type() != DataType::QASYMM8_SIGNED));
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN), "Not supported reduction operation, use CLArgMinMaxLayer");
+ ARM_COMPUTE_RETURN_ERROR_ON((op == ReductionOperation::MEAN_SUM) && (axis == 0) && (input->dimension(0) == 0) &&
+ (input->data_type() != DataType::QASYMM8) &&
+ (input->data_type() != DataType::QASYMM8_SIGNED));
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN),
+ "Not supported reduction operation, use CLArgMinMaxLayer");
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
@@ -71,85 +76,50 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u
return Status{};
}
-
-std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis, ReductionOperation op)
-{
- // Output tensor auto initialization if not yet initialized
- const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, true);
- DataType output_data_type = input->data_type();
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
-
- const unsigned int num_elems_processed_per_iteration = (is_data_type_quantized(input->data_type()) && (axis == 0)) ? 1 : 16;
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
- bool window_changed = false;
- const bool is_serial_op = needs_serialized_reduction(op, input->data_type(), axis);
-
- switch(axis)
- {
- case 0:
- {
- if(!is_serial_op)
- {
- const unsigned int border_width = ((input->dimension(0) % border_val) != 0) ? border_val - input->dimension(0) % border_val : 0;
- AccessWindowStatic input_access(input, 0, 0, input->dimension(0) + border_width, 1);
- AccessWindowHorizontal output_access(output, 0, 1);
- window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
- }
- }
- break;
- case 1:
- case 2:
- case 3:
- {
- AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
- window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
-
- return std::make_tuple(err, win);
-}
} // namespace
CLReductionOperationKernel::CLReductionOperationKernel()
- : _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::SUM_SQUARE), _border_size()
+ : _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::SUM_SQUARE)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-BorderSize CLReductionOperationKernel::border_size() const
+void CLReductionOperationKernel::configure(const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int axis,
+ ReductionOperation op)
{
- return _border_size;
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, axis, op);
}
-void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, axis, op, width);
-}
-
-void CLReductionOperationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width)
+void CLReductionOperationKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int axis,
+ ReductionOperation op)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op, width));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op));
+
+ auto padding_info = get_padding_info({input, output});
_input = input;
_output = output;
_reduction_axis = axis;
_op = op;
+ const TensorShape output_shape =
+ arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, true);
+ auto_init_if_empty(*output->info(),
+ input->info()->clone()->set_tensor_shape(output_shape).reset_padding().set_is_resizable(true));
+
// Set build options
CLBuildOptions build_opts;
DataType data_type = input->info()->data_type();
std::string data_type_promoted{};
- if(is_data_type_quantized(data_type))
+ if (is_data_type_quantized(data_type))
{
data_type_promoted = "int";
}
@@ -158,8 +128,15 @@ void CLReductionOperationKernel::configure(const CLCompileContext &compile_conte
data_type_promoted = get_cl_type_from_data_type(data_type);
}
+ const unsigned int width = input->info()->dimension(0) * input->info()->num_channels();
+ unsigned int vec_size = (is_data_type_quantized(input->info()->data_type()) && (axis == 0)) ? 1 : 16;
+ vec_size = adjust_vec_size(vec_size, width);
+ const unsigned int vec_size_leftover = width % vec_size;
+
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
build_opts.add_option("-DDATA_TYPE_PROMOTED=" + data_type_promoted);
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_leftover));
build_opts.add_option_if(is_data_type_float(data_type), "-DFLOAT_DATA_TYPE");
build_opts.add_option_if(op == ReductionOperation::SUM_SQUARE, "-DSUM_SQUARE");
build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DMEAN");
@@ -167,11 +144,14 @@ void CLReductionOperationKernel::configure(const CLCompileContext &compile_conte
build_opts.add_option_if(op == ReductionOperation::PROD, "-DPROD");
build_opts.add_option_if(op == ReductionOperation::MIN, "-DMIN");
build_opts.add_option_if(op == ReductionOperation::MAX, "-DMAX");
- build_opts.add_option_if(input->info()->num_channels() == 2, "-DCOMPLEX");
- build_opts.add_option_if(is_data_type_quantized(data_type), "-DOFFSET=" + support::cpp11::to_string(input->info()->quantization_info().uniform().offset));
- build_opts.add_option_if(is_data_type_quantized(data_type), "-DSCALE=" + float_to_string_with_full_precision(input->info()->quantization_info().uniform().scale));
-
- switch(op)
+ build_opts.add_option_if(is_data_type_quantized(data_type),
+ "-DOFFSET=" +
+ support::cpp11::to_string(input->info()->quantization_info().uniform().offset));
+ build_opts.add_option_if(
+ is_data_type_quantized(data_type),
+ "-DSCALE=" + float_to_string_with_full_precision(input->info()->quantization_info().uniform().scale));
+
+ switch (op)
{
case ReductionOperation::SUM_SQUARE:
build_opts.add_option(("-DOPERATION=square_sum"));
@@ -181,7 +161,10 @@ void CLReductionOperationKernel::configure(const CLCompileContext &compile_conte
build_opts.add_option(("-DOPERATION=sum"));
break;
case ReductionOperation::MIN:
+ build_opts.add_option(("-DOPERATION=min_"));
+ break;
case ReductionOperation::MAX:
+ build_opts.add_option(("-DOPERATION=max_"));
break;
case ReductionOperation::PROD:
build_opts.add_option(("-DOPERATION=product"));
@@ -191,30 +174,15 @@ void CLReductionOperationKernel::configure(const CLCompileContext &compile_conte
}
// Create kernel
- cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
std::string kernel_axis_name;
const bool is_serial_op = needs_serialized_reduction(_op, _input->info()->data_type(), _reduction_axis);
- switch(axis)
+ switch (axis)
{
case 0:
{
- if(is_serial_op)
- {
- build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
- build_opts.add_option_if_else(_input->info()->data_type() == DataType::F16, "-DCOND_DATA_TYPE=short", "-DCOND_DATA_TYPE=int");
- kernel_axis_name = "non_parallel_x";
- }
- else
- {
- build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DWIDTH=" + support::cpp11::to_string(width));
- const unsigned int width_leftover = input->info()->dimension(0) % border_val;
- const unsigned int border_width = (width_leftover != 0) ? border_val - width_leftover : 0;
- kernel_axis_name = "x";
-
- lws_hint = create_lws_hint_parallel_implementations(input->info()->dimension(0), border_val);
- _border_size = BorderSize(0, border_width, 0, 0);
- }
+ build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(width));
+ kernel_axis_name = ((is_serial_op) ? "non_parallel_x" : "x");
}
break;
case 1:
@@ -236,18 +204,21 @@ void CLReductionOperationKernel::configure(const CLCompileContext &compile_conte
_kernel = create_kernel(compile_context, "reduction_operation_" + kernel_axis_name, build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis, op);
+ TensorShape actual_input_shape = input->info()->tensor_shape();
+ actual_input_shape[0] = width;
- ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
+ Window win = calculate_max_window(actual_input_shape, Steps(vec_size));
+ ICLKernel::configure_internal(win);
- ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLReductionOperationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width)
+Status CLReductionOperationKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ unsigned int axis,
+ ReductionOperation op)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op, width));
- ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), axis, op)));
-
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op));
return Status{};
}
@@ -257,18 +228,19 @@ void CLReductionOperationKernel::run(const Window &window, cl::CommandQueue &que
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
const bool is_serial_op = needs_serialized_reduction(_op, _input->info()->data_type(), _reduction_axis);
- switch(_reduction_axis)
+ switch (_reduction_axis)
{
case 0:
{
// We use parallel reduction only in non quantized types
- if(is_serial_op)
+ if (is_serial_op)
{
// Get first input and output slices
- Window window_in{ window };
- window_in.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), _input->info()->dimension(0)));
+ Window window_in{window};
+ window_in.set(Window::DimX,
+ Window::Dimension(0, _input->info()->dimension(0), _input->info()->dimension(0)));
- Window out_window{ window };
+ Window out_window{window};
out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
Window in_slice = window_in.first_slice_window_1D();
@@ -279,91 +251,114 @@ void CLReductionOperationKernel::run(const Window &window, cl::CommandQueue &que
unsigned int idx = 0;
add_1D_tensor_argument(idx, _input, in_slice);
add_1D_tensor_argument(idx, _output, out_slice);
- enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window_in.slide_window_slice_1D(in_slice) && out_window.slide_window_slice_1D(out_slice));
+ enqueue(queue, *this, in_slice);
+ } while (window_in.slide_window_slice_1D(in_slice) && out_window.slide_window_slice_1D(out_slice));
}
else
{
// Set out window
- Window out_window(window);
- out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
+ bool has_collapsed = true;
+ Window window_in = window.collapse_if_possible(window, 2, &has_collapsed);
+ ARM_COMPUTE_ERROR_ON(!has_collapsed);
- // Get first input and output slices
- Window in_slice = window.first_slice_window_2D();
- Window out_slice = out_window.first_slice_window_2D();
-
- // Reshape window
- const unsigned int border_width = ((in_slice.x().end() % border_val) != 0) ? border_val - in_slice.x().end() % border_val : 0;
- in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start(), in_slice.x().end() + border_width, in_slice.x().step()));
-
- // Set local sums buffer
- unsigned int local_res_size = lws_hint()[0] * _input->info()->element_size();
- _kernel.setArg(num_arguments_per_2D_tensor() * 2, local_res_size, nullptr);
+ Window window_out = window_in;
+ window_out.set(0, Window::Dimension());
- do
- {
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input, in_slice);
- add_2D_tensor_argument(idx, _output, out_slice);
- enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, window_in);
+ add_3D_tensor_argument(idx, _output, window_out);
+ enqueue(queue, *this, window_in);
}
}
break;
case 1:
{
- // Get first input and output slices
- Window window_in{ window };
- window_in.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1), _input->info()->dimension(1)));
- Window in_slice = window_in.first_slice_window_2D();
- Window out_slice = window.first_slice_window_2D();
+ bool has_collapsed = true;
+ Window actual_window = window.collapse_if_possible(window, 2, &has_collapsed);
+ ARM_COMPUTE_ERROR_ON(!has_collapsed);
- do
- {
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input, in_slice);
- add_2D_tensor_argument(idx, _output, out_slice);
- enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window_in.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
+ actual_window = actual_window.shift_dimensions(1, Window::DimY);
+
+ const ITensorInfo *input_info = _input->info();
+ const Strides &input_strides = input_info->strides_in_bytes();
+
+ const ITensorInfo *output_info = _output->info();
+ const Strides &output_strides = output_info->strides_in_bytes();
+
+ unsigned int idx = 0;
+
+ _kernel.setArg(idx++, _input->cl_buffer());
+ _kernel.setArg<cl_uint>(idx++, input_strides[1]);
+ _kernel.setArg<cl_uint>(idx++, input_strides[2]);
+ _kernel.setArg<cl_uint>(idx++, input_info->offset_first_element_in_bytes());
+
+ _kernel.setArg(idx++, _output->cl_buffer());
+ _kernel.setArg<cl_uint>(idx++, output_strides[2]);
+ _kernel.setArg<cl_uint>(idx++, output_info->offset_first_element_in_bytes());
+
+ enqueue(queue, *this, actual_window);
}
break;
case 2:
{
- // Get first input and output slices
- Window window_in{ window };
- window_in.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2), _input->info()->dimension(2)));
- Window in_slice = window_in.first_slice_window_3D();
- Window out_slice = window.first_slice_window_3D();
+ bool has_collapsed = true;
+ Window actual_window = window.collapse_if_possible(window, 3, &has_collapsed);
+ ARM_COMPUTE_ERROR_ON(!has_collapsed);
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, in_slice);
- add_3D_tensor_argument(idx, _output, out_slice);
- enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window_in.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(out_slice));
+ actual_window = actual_window.shift_dimensions(1, Window::DimZ);
+
+ const ITensorInfo *input_info = _input->info();
+ const Strides &input_strides = input_info->strides_in_bytes();
+
+ const ITensorInfo *output_info = _output->info();
+ const Strides &output_strides = output_info->strides_in_bytes();
+
+ unsigned int idx = 0;
+
+ _kernel.setArg(idx++, _input->cl_buffer());
+ _kernel.setArg<cl_uint>(idx++, input_strides[1]);
+ _kernel.setArg<cl_uint>(idx++, input_strides[2]);
+ _kernel.setArg<cl_uint>(idx++, input_strides[3]);
+ _kernel.setArg<cl_uint>(idx++, input_info->offset_first_element_in_bytes());
+
+ _kernel.setArg(idx++, _output->cl_buffer());
+ _kernel.setArg<cl_uint>(idx++, output_strides[1]);
+ _kernel.setArg<cl_uint>(idx++, output_strides[3]);
+ _kernel.setArg<cl_uint>(idx++, output_info->offset_first_element_in_bytes());
+
+ enqueue(queue, *this, actual_window);
}
break;
case 3:
{
- // Get first input and output slices
- Window window_in{ window };
- window_in.set(3, Window::Dimension(0, 1, 1));
- Window in_slice = window_in.first_slice_window_4D();
- Window out_slice = window.first_slice_window_4D();
+ bool has_collapsed = true;
+ Window actual_window = window.shift_dimensions(1, Window::DimW);
- do
- {
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, in_slice);
- add_4D_tensor_argument(idx, _output, out_slice);
- enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window_in.slide_window_slice_4D(in_slice) && window.slide_window_slice_4D(out_slice));
+ actual_window = actual_window.collapse_if_possible(actual_window, 2, &has_collapsed);
+ ARM_COMPUTE_ERROR_ON(!has_collapsed);
+
+ const ITensorInfo *input_info = _input->info();
+ const Strides &input_strides = input_info->strides_in_bytes();
+
+ const ITensorInfo *output_info = _output->info();
+ const Strides &output_strides = output_info->strides_in_bytes();
+
+ unsigned int idx = 0;
+
+ _kernel.setArg(idx++, _input->cl_buffer());
+ _kernel.setArg<cl_uint>(idx++, input_strides[1]);
+ _kernel.setArg<cl_uint>(idx++, input_strides[2]);
+ _kernel.setArg<cl_uint>(idx++, input_strides[3]);
+ _kernel.setArg<cl_uint>(idx++, input_strides[4]);
+ _kernel.setArg<cl_uint>(idx++, input_info->offset_first_element_in_bytes());
+
+ _kernel.setArg(idx++, _output->cl_buffer());
+ _kernel.setArg<cl_uint>(idx++, output_strides[1]);
+ _kernel.setArg<cl_uint>(idx++, output_strides[2]);
+ _kernel.setArg<cl_uint>(idx++, output_strides[4]);
+ _kernel.setArg<cl_uint>(idx++, output_info->offset_first_element_in_bytes());
+
+ enqueue(queue, *this, actual_window);
}
break;
default:
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.h b/src/core/CL/kernels/CLReductionOperationKernel.h
index ff9fd61484..2f94b2add3 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.h
+++ b/src/core/CL/kernels/CLReductionOperationKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLREDUCTIONOPERATIONKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -56,9 +57,8 @@ public:
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
* @param[in] op Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
- * @param[in] width (Optional) In case of x-axis we also need to provide the width of the input image.
*/
- void configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width = 0);
+ void configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op);
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
@@ -67,9 +67,12 @@ public:
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
* @param[in] op Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
- * @param[in] width (Optional) In case of x-axis we also need to provide the width of the input image.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width = 0);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ unsigned int axis,
+ ReductionOperation op);
/** Static function to check if given info will lead to a valid configuration of @ref CLReductionOperationKernel.
*
@@ -78,22 +81,20 @@ public:
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
* @param[in] op Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
- * @param[in] width (Optional) In case of x-axis we also need to provide the width of the input image.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width = 0);
+ static Status
+ validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
private:
const ICLTensor *_input;
ICLTensor *_output;
unsigned int _reduction_axis;
ReductionOperation _op;
- BorderSize _border_size;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLREDUCTIONOPERATIONKERNEL_H */
diff --git a/src/core/CL/kernels/CLRemapKernel.cpp b/src/core/CL/kernels/CLRemapKernel.cpp
deleted file mode 100644
index 0ebeefcc74..0000000000
--- a/src/core/CL/kernels/CLRemapKernel.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLRemapKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include <algorithm>
-
-using namespace arm_compute;
-
-CLRemapKernel::CLRemapKernel()
- : _input(nullptr), _output(nullptr), _map_x(nullptr), _map_y(nullptr)
-{
-}
-
-BorderSize CLRemapKernel::border_size() const
-{
- return BorderSize(1);
-}
-
-void CLRemapKernel::configure(const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy, bool border_undefined)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, map_x, map_y, output, policy, border_undefined);
-}
-
-void CLRemapKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy,
- bool border_undefined)
-{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(map_x, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(map_y, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MSG(policy == InterpolationPolicy::AREA, "Area interpolation is not supported!");
- ARM_COMPUTE_UNUSED(border_undefined);
-
- _input = input;
- _output = output;
- _map_x = map_x;
- _map_y = map_y;
-
- // Create kernel
- std::set<std::string> build_opts = { ("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())) };
- std::string interpolation_name = string_from_interpolation_policy(policy);
- std::transform(interpolation_name.begin(), interpolation_name.end(), interpolation_name.begin(), ::tolower);
- std::string kernel_name = "remap_" + interpolation_name;
- _kernel = create_kernel(compile_context, kernel_name, build_opts);
-
- // Configure window
- constexpr unsigned int num_elems_processed_per_iteration = 4;
-
- const int total_right = ceil_to_multiple(input->info()->dimension(0), num_elems_processed_per_iteration);
- const int access_right = total_right + (((total_right - input->info()->dimension(0)) == 0) ? border_size().right : 0);
-
- Window win = calculate_max_window(*_output->info(), Steps(num_elems_processed_per_iteration));
- AccessWindowStatic input_access(input->info(), -border_size().left, -border_size().top, access_right, input->info()->dimension(1) + border_size().bottom);
-
- AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
-
- update_window_and_padding(win, input_access, output_access);
-
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
-
- ICLKernel::configure_internal(win);
-
- // Set static arguments
- unsigned int idx = 4 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
- _kernel.setArg<cl_float>(idx++, input->info()->dimension(0));
- _kernel.setArg<cl_float>(idx++, input->info()->dimension(1));
-}
-
-void CLRemapKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window slice = window.first_slice_window_2D();
-
- do
- {
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input, slice);
- add_2D_tensor_argument(idx, _output, slice);
- add_2D_tensor_argument(idx, _map_x, slice);
- add_2D_tensor_argument(idx, _map_y, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_2D(slice));
-}
diff --git a/src/core/CL/kernels/CLRemapKernel.h b/src/core/CL/kernels/CLRemapKernel.h
deleted file mode 100644
index 8efcf091ed..0000000000
--- a/src/core/CL/kernels/CLRemapKernel.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLREMAPKERNEL_H
-#define ARM_COMPUTE_CLREMAPKERNEL_H
-
-#include "arm_compute/core/Types.h"
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to perform a remap on a tensor */
-class CLRemapKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLRemapKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLRemapKernel(const CLRemapKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLRemapKernel &operator=(const CLRemapKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLRemapKernel(CLRemapKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLRemapKernel &operator=(CLRemapKernel &&) = default;
- /** Initialize the kernel's input, output and border mode.
- *
- * @param[in] input Source tensor. Data types supported: U8.
- * @param[in] map_x Map for X coordinates. Data types supported: F32.
- * @param[in] map_y Map for Y coordinates. Data types supported: F32.
- * @param[out] output Destination tensor. Data types supported: U8. All but the lowest two dimensions must be the same size as in the input tensor, i.e. remapping is only performed within the XY-plane.
- * @param[in] policy The interpolation type.
- * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
- */
- void configure(const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy, bool border_undefined);
- /** Initialize the kernel's input, output and border mode.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. Data types supported: U8.
- * @param[in] map_x Map for X coordinates. Data types supported: F32.
- * @param[in] map_y Map for Y coordinates. Data types supported: F32.
- * @param[out] output Destination tensor. Data types supported: U8. All but the lowest two dimensions must be the same size as in the input tensor, i.e. remapping is only performed within the XY-plane.
- * @param[in] policy The interpolation type.
- * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy, bool border_undefined);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
- const ICLTensor *_map_x;
- const ICLTensor *_map_y;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLREMAPKERNEL_H */
diff --git a/src/core/CL/kernels/CLReorgLayerKernel.cpp b/src/core/CL/kernels/CLReorgLayerKernel.cpp
index 01853450ee..9fd21943e8 100644
--- a/src/core/CL/kernels/CLReorgLayerKernel.cpp
+++ b/src/core/CL/kernels/CLReorgLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,8 +28,10 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/StringUtils.h"
+#include "arm_compute/core/Validate.h"
+
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
@@ -50,13 +52,16 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i
const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
ARM_COMPUTE_RETURN_ERROR_ON(stride <= 0);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_width] % stride) != 0, "The width of the input tensor must be a multiple of stride");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_height] % stride) != 0, "The height of the input tensor must be a multiple of stride");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_width] % stride) != 0,
+ "The width of the input tensor must be a multiple of stride");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_height] % stride) != 0,
+ "The height of the input tensor must be a multiple of stride");
// Validate output if initialized
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
- const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_reorg_output_shape(*input, stride));
+ const TensorInfo tensor_info_output =
+ output->clone()->set_tensor_shape(misc::shape_calculator::compute_reorg_output_shape(*input, stride));
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -65,9 +70,9 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i
}
} // namespace
-CLReorgLayerKernel::CLReorgLayerKernel()
- : _input(nullptr), _output(nullptr)
+CLReorgLayerKernel::CLReorgLayerKernel() : _input(nullptr), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLReorgLayerKernel::configure(const ICLTensor *input, ICLTensor *output, int32_t stride)
@@ -75,17 +80,22 @@ void CLReorgLayerKernel::configure(const ICLTensor *input, ICLTensor *output, in
configure(CLKernelLibrary::get().get_compile_context(), input, output, stride);
}
-void CLReorgLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t stride)
+void CLReorgLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ int32_t stride)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), stride));
- auto padding_info = get_padding_info({ input, output });
+ auto padding_info = get_padding_info({input, output});
_input = input;
_output = output;
- std::string kernel_name = std::string("reorg_layer_") + lower_string(string_from_data_layout(input->info()->data_layout()));
- const size_t idx_channel = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL);
+ std::string kernel_name =
+ std::string("reorg_layer_") + lower_string(string_from_data_layout(input->info()->data_layout()));
+ const size_t idx_channel =
+ get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL);
// Create kernel
CLBuildOptions build_opts;
@@ -96,12 +106,13 @@ void CLReorgLayerKernel::configure(const CLCompileContext &compile_context, cons
// Configure window
// auto inizialize the output tensor if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(misc::shape_calculator::compute_reorg_output_shape(*input->info(), stride)));
+ auto_init_if_empty(*output->info(),
+ input->info()->clone()->set_tensor_shape(
+ misc::shape_calculator::compute_reorg_output_shape(*input->info(), stride)));
Window win = calculate_max_window(*output->info(), Steps());
// The CLWeightsReshapeKernel doesn't need padding so update_window_and_padding() can be skipped
- output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
ICLKernel::configure_internal(win);
_config_id = kernel_name;
@@ -118,7 +129,9 @@ void CLReorgLayerKernel::configure(const CLCompileContext &compile_context, cons
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLReorgLayerKernel::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output, int32_t stride)
+Status CLReorgLayerKernel::validate(const arm_compute::ITensorInfo *input,
+ const arm_compute::ITensorInfo *output,
+ int32_t stride)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, stride));
@@ -138,7 +151,6 @@ void CLReorgLayerKernel::run(const Window &window, cl::CommandQueue &queue)
add_3D_tensor_argument(idx, _input, slice);
add_3D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
+ } while (window.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLReorgLayerKernel.h b/src/core/CL/kernels/CLReorgLayerKernel.h
index 455a6170c6..f335071e9f 100644
--- a/src/core/CL/kernels/CLReorgLayerKernel.h
+++ b/src/core/CL/kernels/CLReorgLayerKernel.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLREORGLAYERKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
diff --git a/src/core/CL/kernels/CLReverseKernel.cpp b/src/core/CL/kernels/CLReverseKernel.cpp
index b3c9bcafd1..00241b161b 100644
--- a/src/core/CL/kernels/CLReverseKernel.cpp
+++ b/src/core/CL/kernels/CLReverseKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,6 +28,9 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -37,17 +40,21 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis)
+Status
+validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis, bool use_inverted_axis)
{
+ ARM_COMPUTE_UNUSED(use_inverted_axis);
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, axis);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(axis, 1, DataType::U32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(axis, 1, DataType::U32, DataType::S32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis->num_dimensions() > 1, "Axis must be a 1D tensor");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4,
+ "Current implementation only supports up to 4 dimensions.");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis->dimension(0) > 4, "Only up to 4 dimensions can be reversed");
// Checks performed when output is configured
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
@@ -58,20 +65,27 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
}
} // namespace
-CLReverseKernel::CLReverseKernel()
- : _input(nullptr), _output(nullptr), _axis(nullptr)
+CLReverseKernel::CLReverseKernel() : _input(nullptr), _output(nullptr), _axis(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *axis)
+void CLReverseKernel::configure(const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *axis,
+ bool use_inverted_axis)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, axis);
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, axis, use_inverted_axis);
}
-void CLReverseKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *axis)
+void CLReverseKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *axis,
+ bool use_inverted_axis)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, axis);
- auto padding_info = get_padding_info({ input, output, axis });
+ auto padding_info = get_padding_info({input, output, axis});
_input = input;
_output = output;
@@ -80,12 +94,14 @@ void CLReverseKernel::configure(const CLCompileContext &compile_context, const I
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*output->info(), *input->info()->clone());
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis->info(), use_inverted_axis));
// Set kernel build options
CLBuildOptions build_opts;
build_opts.add_option("-DNUM_REVERSE_DIMS=" + support::cpp11::to_string(axis->info()->dimension(0)));
build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size()));
+ build_opts.add_option("-DRANK=" + support::cpp11::to_string(input->info()->num_dimensions()));
+ build_opts.add_option_if(use_inverted_axis, "-DUSE_INVERTED_AXIS");
// Create kernel
_kernel = create_kernel(compile_context, "reverse", build_opts.options());
@@ -113,9 +129,12 @@ void CLReverseKernel::configure(const CLCompileContext &compile_context, const I
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLReverseKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis)
+Status CLReverseKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *axis,
+ bool use_inverted_axis)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, use_inverted_axis));
return Status{};
}
@@ -135,7 +154,6 @@ void CLReverseKernel::run(const Window &window, cl::CommandQueue &queue)
add_1D_tensor_argument(idx, _axis, axis_slice);
add_4D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_4D(slice));
+ } while (collapsed.slide_window_slice_4D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLReverseKernel.h b/src/core/CL/kernels/CLReverseKernel.h
index 4a21e4f802..a630aec15a 100644
--- a/src/core/CL/kernels/CLReverseKernel.h
+++ b/src/core/CL/kernels/CLReverseKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CLREVERSEKERNEL_H
-#define ARM_COMPUTE_CLREVERSEKERNEL_H
+#ifndef ACL_SRC_CORE_CL_KERNELS_CLREVERSEKERNEL_H
+#define ACL_SRC_CORE_CL_KERNELS_CLREVERSEKERNEL_H
#include "src/core/CL/ICLKernel.h"
@@ -48,29 +48,43 @@ public:
~CLReverseKernel() = default;
/** Initialise the kernel's inputis and output
*
- * @param[in] input Input tensor. Data types supported: All.
- * @param[out] output Output tensor. Data type supported: Same as @p input
- * @param[in] axis Axis tensor. Contains the indices of the dimensions to reverse. Data type supported: U32
+ * @param[in] input Input tensor. Data types supported: All.
+ * @param[out] output Output tensor. Data type supported: Same as @p input
+ * @param[in] axis Axis tensor. Contains the indices of the dimensions to reverse. Data type supported: U32/S32
+ * @param[in] use_inverted_axis Reverse ACL axis indices convention i.e. acl.dim(0) = tensor_rank -1
+ *
+ * @note The value of each axis should be between [-rank, rank)
+ * @note If there are duplicate values in the tensor, the subsequent axis values are ignored. e.g. an array of [2, 2] has the same effects as [2].
+ *
+ * @deprecated Support for U32 in axis tensor will be removed in 24.02 release
+ *
*/
- void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *axis);
+ void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *axis, bool use_inverted_axis);
/** Initialise the kernel's inputis and output
*
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data types supported: All.
- * @param[out] output Output tensor. Data type supported: Same as @p input
- * @param[in] axis Axis tensor. Contains the indices of the dimensions to reverse. Data type supported: U32
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] input Input tensor. Data types supported: All.
+ * @param[out] output Output tensor. Data type supported: Same as @p input
+ * @param[in] axis Axis tensor. Contains the indices of the dimensions to reverse. Data type supported: U32/S32
+ * @param[in] use_inverted_axis Reverse ACL axis indices convention i.e. acl.dim(0) = tensor_rank -1
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *axis);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const ICLTensor *axis,
+ bool use_inverted_axis);
/** Static function to check if given info will lead to a valid configuration of @ref CLReverseKernel
*
- * @param[in] input Input tensor info. Data types supported: All.
- * @param[in] output Output tensor info. Data type supported: Same as @p input
- * @param[in] axis Axis tensor info. Contains the indices of the dimensions to reverse. Data type supported: U32
+ * @param[in] input Input tensor info. Data types supported: All.
+ * @param[in] output Output tensor info. Data type supported: Same as @p input
+ * @param[in] axis Axis tensor info. Contains the indices of the dimensions to reverse. Data type supported: U32/S32
+ * @param[in] use_inverted_axis Reverse ACL axis indices convention i.e. acl.dim(0) = tensor_rank -1
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis);
+ static Status
+ validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis, bool use_inverted_axis);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -81,4 +95,4 @@ public:
const ICLTensor *_axis;
};
} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLREVERSEKERNEL_H */
+#endif // ACL_SRC_CORE_CL_KERNELS_CLREVERSEKERNEL_H
diff --git a/src/core/CL/kernels/CLScaleKernel.cpp b/src/core/CL/kernels/CLScaleKernel.cpp
deleted file mode 100644
index c2c78c8f6d..0000000000
--- a/src/core/CL/kernels/CLScaleKernel.cpp
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLScaleKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/CL/ICLKernel.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-#include "src/core/utils/ScaleUtils.h"
-
-#include <set>
-#include <string>
-
-namespace arm_compute
-{
-namespace
-{
-inline std::pair<float, float> calculate_scale_factors(const ITensorInfo &input, const ITensorInfo &output, const ScaleKernelInfo &info)
-{
- const DataLayout data_layout = info.data_layout == DataLayout::UNKNOWN ? input.data_layout() : info.data_layout;
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
-
- // Compute the ratio between source width/height and destination width/height
- const unsigned int input_width = input.dimension(idx_width);
- const unsigned int input_height = input.dimension(idx_height);
- const unsigned int output_width = output.dimension(idx_width);
- const unsigned int output_height = output.dimension(idx_height);
-
- float wr = arm_compute::scale_utils::calculate_resize_ratio(input_width, output_width, info.align_corners);
- float hr = arm_compute::scale_utils::calculate_resize_ratio(input_height, output_height, info.align_corners);
-
- return std::make_pair(wr, hr);
-}
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ScaleKernelInfo &info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::U8, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON(output == input);
- ARM_COMPUTE_RETURN_ERROR_ON(info.align_corners && !arm_compute::scale_utils::is_align_corners_allowed_sampling_policy(info.sampling_policy));
-
- float wr = 0.f;
- float hr = 0.f;
- std::tie(wr, hr) = calculate_scale_factors(*input, *output, info);
-
- ARM_COMPUTE_RETURN_ERROR_ON(info.interpolation_policy == InterpolationPolicy::AREA && (wr > 1.f || hr > 1.f));
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const ScaleKernelInfo &info, BorderSize &border)
-{
- Window win{};
- bool window_changed{};
- unsigned int num_elems_processed_per_iteration = 0;
- const DataLayout data_layout = info.data_layout == DataLayout::UNKNOWN ? input->data_layout() : info.data_layout;
-
- switch(data_layout)
- {
- case DataLayout::NCHW:
- {
- if(info.border_mode == BorderMode::UNDEFINED)
- {
- border = BorderSize(0);
- }
-
- num_elems_processed_per_iteration = 4;
- // Configure kernel window
- win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
- AccessWindowStatic input_access(input,
- -border.left, -border.top,
- input->dimension(0) + border.right,
- input->dimension(1) + border.bottom);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
-
- output_access.set_valid_region(win, calculate_valid_region_scale(*(input),
- output->tensor_shape(),
- info.interpolation_policy,
- info.sampling_policy,
- info.border_mode == BorderMode::UNDEFINED));
-
- window_changed = update_window_and_padding(win, input_access, output_access);
- }
- break;
- case DataLayout::NHWC:
- {
- // Configure kernel window
- win = calculate_max_window(*output, Steps());
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Data layout not supported");
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-BorderSize CLScaleKernel::border_size() const
-{
- return BorderSize(static_cast<size_t>(_data_layout == DataLayout::NCHW));
-}
-
-Status CLScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ScaleKernelInfo &info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, info));
- const DataLayout data_layout = info.data_layout == DataLayout::UNKNOWN ? input->data_layout() : info.data_layout;
- BorderSize border = BorderSize(static_cast<size_t>(data_layout == DataLayout::NCHW));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), info, border).first);
-
- return Status{};
-}
-
-const ICLTensor *CLScaleKernel::input() const
-{
- return _input;
-}
-
-const ICLTensor *CLScaleKernel::output() const
-{
- return _output;
-}
-
-void CLScaleKernel::configure(const ICLTensor *input, ICLTensor *output, const ScaleKernelInfo &info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
-}
-
-void CLScaleKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ScaleKernelInfo &info)
-{
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), info));
- auto padding_info = get_padding_info({ input, output });
-
- _input = input;
- _output = output;
- _interpolation_policy = info.interpolation_policy;
- _data_layout = info.data_layout == DataLayout::UNKNOWN ? input->info()->data_layout() : info.data_layout;
- _align_corners = info.align_corners;
-
- float wr = 0.f;
- float hr = 0.f;
- std::tie(wr, hr) = calculate_scale_factors(*input->info(), *output->info(), info);
-
- const bool call_quantized_kernel = is_data_type_quantized_asymmetric(input->info()->data_type()) && _interpolation_policy == InterpolationPolicy::BILINEAR;
-
- const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- const bool is_nhwc = _data_layout == DataLayout::NHWC;
-
- // Compute actual border size
- BorderSize border = border_size();
-
- auto interpolation_policy_to_use = _interpolation_policy;
- // Area interpolation behaves as Nearest Neighbour in case of up-sampling
- if(_interpolation_policy == InterpolationPolicy::AREA && wr <= 1.f && hr <= 1.f)
- {
- interpolation_policy_to_use = InterpolationPolicy::NEAREST_NEIGHBOR;
- }
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), info, border);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // Create kernel
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DCONSTANT_VALUE=" + string_from_pixel_value(info.constant_border_value, input->info()->data_type()));
- build_opts.add_option("-DBORDER_SIZE=" + support::cpp11::to_string(border.right));
- build_opts.add_option_if(info.border_mode == BorderMode::REPLICATE, "-DBORDER_MODE_REPLICATE");
- build_opts.add_option_if(is_nhwc, "-DDEPTH_OUT=" + support::cpp11::to_string(output->info()->dimension(2)));
- build_opts.add_option_if_else(info.sampling_policy == SamplingPolicy::CENTER, "-DSAMPLING_POLICY_CENTER", "-DSAMPLING_POLICY_TOP_LEFT");
- build_opts.add_option_if(_align_corners, "-DALIGN_CORNERS");
- if(call_quantized_kernel)
- {
- const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform();
- build_opts.add_option("-DSCALE=" + support::cpp11::to_string(qinfo.scale));
- build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(qinfo.offset));
- }
- std::string interpolation_name = string_from_interpolation_policy(interpolation_policy_to_use);
- std::transform(interpolation_name.begin(), interpolation_name.end(), interpolation_name.begin(), ::tolower);
- std::string kernel_name = "scale_" + interpolation_name;
- kernel_name += call_quantized_kernel ? "_quantized_" : "_";
- kernel_name += lower_string(string_from_data_layout(_data_layout));
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- unsigned int idx = is_nhwc ? 2 * num_arguments_per_4D_tensor() : 2 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
-
- const unsigned int input_width = input->info()->dimension(idx_width);
- const unsigned int input_height = input->info()->dimension(idx_height);
-
- _kernel.setArg<float>(idx++, input_width);
- _kernel.setArg<float>(idx++, input_height);
- _kernel.setArg<float>(idx++, wr);
- _kernel.setArg<float>(idx++, hr);
-
- // Set config_id for enabling LWS tuning
- _config_id = "scale_";
- _config_id += (info.border_mode == BorderMode::REPLICATE ? "Bord_rep" : "");
- _config_id += (info.sampling_policy == SamplingPolicy::CENTER ? "center" : "topleft");
- _config_id += (is_nhwc ? "nhwc" : "nchw");
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(3));
- if(is_nhwc)
- {
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
- }
-}
-
-void CLScaleKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
- switch(_data_layout)
- {
- case DataLayout::NCHW:
- {
- Window slice = window.first_slice_window_2D();
-
- do
- {
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, _input, slice);
- add_2D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_2D(slice));
- break;
- }
- case DataLayout::NHWC:
- {
- Window collapsed = window.collapse(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_4D();
-
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, slice);
- add_4D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Data layout not supported");
- }
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLScaleKernel.h b/src/core/CL/kernels/CLScaleKernel.h
deleted file mode 100644
index a72e3938d9..0000000000
--- a/src/core/CL/kernels/CLScaleKernel.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLSCALEKERNEL_H
-#define ARM_COMPUTE_CLSCALEKERNEL_H
-
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/CL/ICLSimple2DKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the scale kernel */
-class CLScaleKernel : public ICLSimple2DKernel
-{
-public:
- /** Initialise the kernel's inputs, output and interpolation policy
- *
- * @param[in] input Source tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
- * @param[out] output Destination tensor. Data types supported: Same as @p input
- * All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane.
- * @param[in] info @ref ScaleKernelInfo Kernel descriptor to be used to configure.
- */
- void configure(const ICLTensor *input, ICLTensor *output, const ScaleKernelInfo &info);
- /** Initialise the kernel's inputs, output and interpolation policy
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
- * @param[out] output Destination tensor. Data types supported: Same as @p input
- * All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane.
- * @param[in] info @ref ScaleKernelInfo Kernel descriptor to be used to configure.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ScaleKernelInfo &info);
-
- /** Static function to check if given info will lead to a valid configuration of @ref CLScaleKernel
- *
- * @param[in] input Source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
- * @param[in] output Destination tensor info. Data types supported: Same as @p input
- * All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane.
- * @param[in] info @ref ScaleKernelInfo Kernel descriptor to be used to validate
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ScaleKernelInfo &info);
- /** Input tensor accessor.
- *
- * @return Pointer to input tensor.
- */
- const ICLTensor *input() const;
- /** Output tensor accessor.
- *
- * @return Pointer to output tensor.
- */
- const ICLTensor *output() const;
-
- // Inherited methods overridden:
- BorderSize border_size() const override;
- void run(const Window &window, cl::CommandQueue &queue) override;
-
- // Getter for interpolation policy
- InterpolationPolicy get_interpolation_policy() const
- {
- return _interpolation_policy;
- }
-
-private:
- InterpolationPolicy _interpolation_policy = InterpolationPolicy::BILINEAR;
- DataLayout _data_layout = DataLayout::UNKNOWN;
- bool _align_corners = false;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLSCALEKERNEL_H */
diff --git a/src/core/CL/kernels/CLSelectKernel.cpp b/src/core/CL/kernels/CLSelectKernel.cpp
index f8e63ddc43..703c64d8d3 100644
--- a/src/core/CL/kernels/CLSelectKernel.cpp
+++ b/src/core/CL/kernels/CLSelectKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,10 +29,11 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
namespace arm_compute
@@ -50,9 +51,11 @@ Status validate_arguments(const ITensorInfo *c, const ITensorInfo *x, const ITen
const bool is_same_rank = (c->tensor_shape().num_dimensions() == x->tensor_shape().num_dimensions());
ARM_COMPUTE_RETURN_ERROR_ON(is_same_rank && (x->tensor_shape() != c->tensor_shape()));
- ARM_COMPUTE_RETURN_ERROR_ON(!is_same_rank && ((c->tensor_shape().num_dimensions() > 1) || (c->tensor_shape().x() != x->tensor_shape()[x->tensor_shape().num_dimensions() - 1])));
+ ARM_COMPUTE_RETURN_ERROR_ON(!is_same_rank &&
+ ((c->tensor_shape().num_dimensions() > 1) ||
+ (c->tensor_shape().x() != x->tensor_shape()[x->tensor_shape().num_dimensions() - 1])));
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(x, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(x, output);
@@ -62,12 +65,16 @@ Status validate_arguments(const ITensorInfo *c, const ITensorInfo *x, const ITen
}
} // namespace
-CLSelectKernel::CLSelectKernel()
- : _c(nullptr), _x(nullptr), _y(nullptr), _output(nullptr), _has_same_rank(false)
+CLSelectKernel::CLSelectKernel() : _c(nullptr), _x(nullptr), _y(nullptr), _output(nullptr), _has_same_rank(false)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLSelectKernel::configure(const CLCompileContext &compile_context, const ICLTensor *c, const ICLTensor *x, const ICLTensor *y, ICLTensor *output)
+void CLSelectKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *c,
+ const ICLTensor *x,
+ const ICLTensor *y,
+ ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(c, x, y, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(c->info(), x->info(), y->info(), output->info()));
@@ -78,7 +85,7 @@ void CLSelectKernel::configure(const CLCompileContext &compile_context, const IC
_output = output;
_has_same_rank = (c->info()->tensor_shape().num_dimensions() == x->info()->tensor_shape().num_dimensions());
- auto padding_info = get_padding_info({ c, x, y, output });
+ auto padding_info = get_padding_info({c, x, y, output});
const unsigned int vec_size_x = adjust_vec_size(16 / x->info()->element_size(), x->info()->dimension(0));
const int vec_size_x_leftovers = output->info()->dimension(0) % vec_size_x;
@@ -90,14 +97,14 @@ void CLSelectKernel::configure(const CLCompileContext &compile_context, const IC
// Create kernel
std::string kernel_name = "select";
- if(_has_same_rank)
+ if (_has_same_rank)
{
kernel_name += "_same_rank";
}
else
{
const bool is_input_rank_greater_than_two = x->info()->tensor_shape().num_dimensions() > 2;
- if(is_input_rank_greater_than_two)
+ if (is_input_rank_greater_than_two)
{
const size_t width = x->info()->tensor_shape().x();
const size_t height = x->info()->tensor_shape().y();
@@ -126,7 +133,8 @@ void CLSelectKernel::configure(const CLCompileContext &compile_context, const IC
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLSelectKernel::validate(const ITensorInfo *c, const ITensorInfo *x, const ITensorInfo *y, const ITensorInfo *output)
+Status
+CLSelectKernel::validate(const ITensorInfo *c, const ITensorInfo *x, const ITensorInfo *y, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(c, x, y, output));
return Status{};
@@ -140,7 +148,7 @@ void CLSelectKernel::run(const arm_compute::Window &window, cl::CommandQueue &qu
Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
Window slice = collapsed.first_slice_window_3D();
- if(!_has_same_rank)
+ if (!_has_same_rank)
{
Window vector_slice = window.first_slice_window_1D();
vector_slice.set(Window::DimX, Window::Dimension(0, 0, 0));
@@ -151,7 +159,7 @@ void CLSelectKernel::run(const arm_compute::Window &window, cl::CommandQueue &qu
do
{
unsigned int idx = _has_same_rank ? 0 : num_arguments_per_1D_tensor();
- if(_has_same_rank)
+ if (_has_same_rank)
{
add_3D_tensor_argument(idx, _c, slice);
}
@@ -160,7 +168,6 @@ void CLSelectKernel::run(const arm_compute::Window &window, cl::CommandQueue &qu
add_3D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
+ } while (collapsed.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLSelectKernel.h b/src/core/CL/kernels/CLSelectKernel.h
index b8c10cd7cf..c4256fd743 100644
--- a/src/core/CL/kernels/CLSelectKernel.h
+++ b/src/core/CL/kernels/CLSelectKernel.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLSELECTKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -60,7 +61,11 @@ public:
* @param[out] y Second input tensor. Data types supported: Same as @p x
* @param[in] output Output tensor. Data types supported: Same as @p x.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *c, const ICLTensor *x, const ICLTensor *y, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *c,
+ const ICLTensor *x,
+ const ICLTensor *y,
+ ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLSelectKernel
*
* @param[in] c Condition input tensor. Data types supported: U8.
diff --git a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
deleted file mode 100644
index 526d9e187d..0000000000
--- a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLSoftmaxLayerKernel.h"
-
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-/** Calculates softmax parameters from the quantized input scale and scaling factor for the exponent and places them as build options.
- *
- * Prepares these build options:
- * -INPUT_BETA_MULTIPLIER, INPUT_BETA_LEFT_SHIFT - quantized representation of beta multiplier.
- * -DIFF_MIN - threshold difference between maximum value of input data and current processed value,
- * it defines whether the value will be taken into account or not.
- *
- * @param[in] build_opts Build options to extend
- * @param[in] input_scale Input scaling factor
- * @param[in] beta Exponent scaling factor beta
- */
-CLBuildOptions prepare_quantized_softmax_build_options(float input_scale, float beta)
-{
- // Number of integer bits in temporary fixed-point representation of current-to-max difference
- static const int scaled_diff_int_bits = 5;
- // Number of integer bits used in temporary fixed-point representation of exponent accumulator
- static const int exp_accumulation_in_bits = 12;
-
- const double beta_multiplier = std::min(
- 1.0 * beta * input_scale * (1 << (31 - scaled_diff_int_bits)),
- (1LL << 31) - 1.0);
- int input_beta_multiplier;
- int input_beta_left_shift;
- quantization::calculate_quantized_multiplier_greater_than_one(beta_multiplier, &input_beta_multiplier, &input_beta_left_shift);
-
- const double max_input_rescaled = 1.0 * ((1 << scaled_diff_int_bits) - 1) * (1LL << (31 - scaled_diff_int_bits)) / (1LL << input_beta_left_shift);
- const int diff_min = -1.f * std::floor(max_input_rescaled);
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DSCALED_DIFF_INT_BITS=" + support::cpp11::to_string(scaled_diff_int_bits));
- build_opts.add_option("-DEXP_ACCUMULATION_INT_BITS=" + support::cpp11::to_string(exp_accumulation_in_bits));
- build_opts.add_option("-DINPUT_BETA_MULTIPLIER=" + support::cpp11::to_string(input_beta_multiplier));
- build_opts.add_option("-DINPUT_BETA_LEFT_SHIFT=" + support::cpp11::to_string(input_beta_left_shift));
- build_opts.add_option("-DDIFF_MIN=" + support::cpp11::to_string(diff_min));
-
- return build_opts;
-}
-
-Status validate_arguments_1DMaxShiftExpSum(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(max, sum, output);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, max);
-
- const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(input->data_type());
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- if(is_quantized_asymmetric)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- }
-
- // Checks performed when sum is configured
- if(sum->total_size() != 0)
- {
- if(is_quantized_asymmetric)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(sum, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(max, sum);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(max, sum);
- }
-
- return Status{};
-}
-
-Status validate_arguments_1DNorm(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(sum, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum);
- ARM_COMPUTE_RETURN_ERROR_ON(info.is_log && !is_data_type_float(info.input_data_type));
-
- // Note: output should always have a scale of 1/256 and offset 0
- const QuantizationInfo allowed_quantization_info = get_softmax_output_quantization_info(info.input_data_type, info.is_log);
- const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(info.input_data_type);
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- if(!is_quantized_asymmetric)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
- ARM_COMPUTE_RETURN_ERROR_ON(output->quantization_info() != allowed_quantization_info);
- }
- }
-
- return Status{};
-}
-} // namespace
-
-/**< Grid size (obtained through auto-tuning) */
-const unsigned int CLLogits1DMaxShiftExpSumKernel::_grid_size = 64;
-/**< Vector size in the serial case (obtained through auto-tuning) */
-const unsigned int CLLogits1DMaxShiftExpSumKernel::_serial_vector_size = 8;
-/**< Vector size in the parallel case (obtained through auto-tuning, enables the best memory access pattern for Bifrost) .*/
-const unsigned int CLLogits1DMaxShiftExpSumKernel::_parallel_vector_size = 4;
-
-CLLogits1DMaxShiftExpSumKernel::CLLogits1DMaxShiftExpSumKernel()
- : _input(nullptr), _max(nullptr), _output(nullptr), _sum(nullptr)
-{
-}
-
-void CLLogits1DMaxShiftExpSumKernel::configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, max, output, sum, info);
-}
-
-void CLLogits1DMaxShiftExpSumKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, max, sum, output);
-
- auto padding_info = get_padding_info({ input, max, output, sum });
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*sum->info(), input->info()->clone()->set_tensor_shape(max->info()->tensor_shape()));
- auto_init_if_empty(*output->info(), *input->info()->clone());
-
- // Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DMaxShiftExpSum(input->info(), max->info(), output->info(), sum->info()));
-
- _input = input;
- _max = max;
- _output = output;
- _sum = sum;
-
- const DataType dt = input->info()->data_type();
- const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform();
- const size_t reduction_dim_size = input->info()->dimension(0);
- const float beta = info.beta;
- const auto is_signed_qasymm8 = is_data_type_quantized_asymmetric_signed(info.input_data_type);
- const int min_value = is_signed_qasymm8 ? CL_SCHAR_MIN : 0;
-
- ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(reduction_dim_size);
- const unsigned int vector_size = adjust_vec_size(std::get<1>(parallel_reduction_info), reduction_dim_size);
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dt));
- build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value));
- build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(reduction_dim_size));
- build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(reduction_dim_size % vector_size));
- build_opts.add_option("-DLOG_VECTOR_SIZE=" + support::cpp11::to_string(lround(log2(vector_size))));
- build_opts.add_option_if((reduction_dim_size % vector_size) != 0, "-DNON_MULTIPLE_OF_VECTOR_SIZE");
- build_opts.add_option_if(is_signed_qasymm8, "-DQASYMM8_SIGNED");
- build_opts.add_option_if(is_data_type_float(dt) && (beta != 1.0f), "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(is_data_type_float(dt) && info.is_log, "-DLOG_SOFTMAX");
- build_opts.add_option_if(is_data_type_float(dt), "-DMINVAL=" + ((dt == DataType::F16) ? std::string("-HALF_MAX") : std::string("-FLT_MAX")));
- build_opts.add_options_if(is_data_type_quantized_asymmetric(dt), prepare_quantized_softmax_build_options(qinfo.scale, beta).options());
-
- cl::NDRange lws_hint(cl::NullRange);
- std::string kernel_name = std::string("softmax_layer_max_shift_exp_sum_") + (is_data_type_quantized_asymmetric(dt) ? "quantized_" : "");
-
- // Configure parallel kernel if needed
- if(std::get<0>(parallel_reduction_info))
- {
- kernel_name += "parallel";
- bool is_grid_size_pow2 = (_grid_size != 0) && ((_grid_size & (_grid_size - 1)) == 0);
- build_opts.add_option_if(is_grid_size_pow2 && _grid_size <= 256, "-DGRID_SIZE=" + support::cpp11::to_string(_grid_size));
-
- // Handle boundary conditions.
- const unsigned int multiple_grid_size = (reduction_dim_size / vector_size) % _grid_size;
- build_opts.add_option_if((multiple_grid_size != 0) || ((reduction_dim_size % vector_size) != 0), "-DNON_MULTIPLE_OF_GRID_SIZE");
- // Setting _lws_hint in this way can also communicate grid_size to CLLogits1DMaxShiftExpSumKernel::run().
- // A single workgroup performs reduction in dimension 0 in the parallel case, hence lws[0]==gws[0].
- lws_hint = cl::NDRange(_grid_size);
- }
- else
- {
- kernel_name += "serial";
- }
-
- // Create kernel.
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure window
- Window win = calculate_max_window(*(input->info()), Steps(reduction_dim_size));
- ICLKernel::configure_internal(win, lws_hint);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLLogits1DMaxShiftExpSumKernel::validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DMaxShiftExpSum(input, max, output, sum));
- return Status{};
-}
-
-CLLogits1DMaxShiftExpSumKernel::ParallelReductionInfo CLLogits1DMaxShiftExpSumKernel::is_parallel_reduction(size_t size)
-{
- bool is_parallel_reduction = (size >= (_grid_size * _serial_vector_size)) && (_grid_size > 1);
- unsigned int vector_size = is_parallel_reduction ? _parallel_vector_size : _serial_vector_size;
- return std::make_tuple(is_parallel_reduction, vector_size);
-}
-
-void CLLogits1DMaxShiftExpSumKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- // Collapse window in Z dimension
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
-
- // Reconfigure window in case of parallel reduction
- ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(_input->info()->dimension(0));
- if(std::get<0>(parallel_reduction_info))
- {
- // Launch grid_size parallel work items
- window_collapsed.set(Window::DimX, Window::Dimension(0, _grid_size, 1));
- }
-
- // Get slices
- Window slice = window_collapsed.first_slice_window_3D();
- do
- {
- unsigned int idx = 0;
- // Set inputs
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _max, slice);
- add_3D_tensor_argument(idx, _output, slice);
- add_3D_tensor_argument(idx, _sum, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
-}
-
-CLLogits1DNormKernel::CLLogits1DNormKernel()
- : _input(nullptr), _sum(nullptr), _output(nullptr)
-{
-}
-
-void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, sum, output, info);
-}
-
-void CLLogits1DNormKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output);
-
- auto padding_info = get_padding_info({ input, output, sum });
-
- // Note: output should always have a scale of 1/256 and offset 0
- const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(info.input_data_type);
- const DataType output_data_type = info.input_data_type;
- const QuantizationInfo allowed_quantization_info = get_softmax_output_quantization_info(info.input_data_type, info.is_log);
- const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform();
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*output->info(),
- input->info()->clone()->set_data_type(output_data_type).set_quantization_info(allowed_quantization_info));
-
- // Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DNorm(input->info(), sum->info(), output->info(), info));
-
- _input = input;
- _sum = sum;
- _output = output;
-
- const auto is_signed_qasymm8 = is_data_type_quantized_asymmetric_signed(info.input_data_type);
- const int min_value = is_signed_qasymm8 ? CL_SCHAR_MIN : 0;
- const unsigned int vector_size = adjust_vec_size(16, input->info()->dimension(0));
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(info.input_data_type));
- build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value));
- build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
- build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % vector_size));
- build_opts.add_option_if(is_data_type_quantized_asymmetric_signed(info.input_data_type), "-DQASYMM8_SIGNED");
- build_opts.add_options_if(is_quantized_asymmetric,
- prepare_quantized_softmax_build_options(qinfo.scale, info.beta).options());
- build_opts.add_option_if(info.is_log, "-DLOG_SOFTMAX");
-
- // Create kernel
- std::string kernel_name = std::string("softmax_layer_norm") + (is_quantized_asymmetric ? "_quantized" : "");
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure window
- auto win = calculate_max_window(*(input->info()), Steps(vector_size));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLLogits1DNormKernel::validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DNorm(input, sum, output, info));
-
- return Status{};
-}
-
-void CLLogits1DNormKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = window_collapsed.first_slice_window_3D();
-
- do
- {
- Window sum_slice = slice;
- sum_slice.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- unsigned int idx = 0;
- // Set inputs
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _sum, sum_slice);
- add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
-}
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/CL/kernels/CLSoftmaxLayerKernel.h b/src/core/CL/kernels/CLSoftmaxLayerKernel.h
deleted file mode 100644
index 29e0f63e46..0000000000
--- a/src/core/CL/kernels/CLSoftmaxLayerKernel.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
-#define ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
-
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/CL/ICLSimple3DKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for max, shifting, exponentiating and summing the logits */
-class CLLogits1DMaxShiftExpSumKernel : public ICLKernel
-{
-public:
- /** Info for whether a parallel reduction will be run and the vector size of the execution. */
- using ParallelReductionInfo = std::tuple<bool, unsigned int>;
-
-public:
- /** Default constructor */
- CLLogits1DMaxShiftExpSumKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLLogits1DMaxShiftExpSumKernel(const CLLogits1DMaxShiftExpSumKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLLogits1DMaxShiftExpSumKernel &operator=(const CLLogits1DMaxShiftExpSumKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLLogits1DMaxShiftExpSumKernel(CLLogits1DMaxShiftExpSumKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLLogits1DMaxShiftExpSumKernel &operator=(CLLogits1DMaxShiftExpSumKernel &&) = default;
- /** Set the input and output tensors.
- *
- * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[in,out] max Max values tensor. Data types supported: same as @p input
- * @param[out] output Destination tensor. Data types supported: same as @p input
- * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input
- * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
- */
- void configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info);
- /** Set the input and output tensors.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[in,out] max Max values tensor. Data types supported: same as @p input
- * @param[out] output Destination tensor. Data types supported: same as @p input
- * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input
- * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxShiftExpSumKernel
- *
- * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[in] max Max values tensor. Data types supported: same as @p input
- * @param[in] output Destination tensor. Data types supported: same as @p input
- * @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p input
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
- /** Checks if the given size is eligible for parallel reduction
- *
- * @note Serial reduction is launched for width < (_grid_size * _serial_vector_size).
- * @note Parallel reduction is launched for width >= (_grid_size * _serial_vector_size) and vector_size is forced to 4.
- *
- * @param[in] size Size to check
- *
- * @return A two-element tuple where the first element is a boolean specifying if a parallel reduction will be run,
- * while the second element is the vector size of the execution.
- */
- static ParallelReductionInfo is_parallel_reduction(size_t size);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_max;
- ICLTensor *_output;
- ICLTensor *_sum;
-
-private:
- static const unsigned int _grid_size;
- static const unsigned int _serial_vector_size;
- static const unsigned int _parallel_vector_size;
-};
-/** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */
-class CLLogits1DNormKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLLogits1DNormKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLLogits1DNormKernel(const CLLogits1DNormKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLLogits1DNormKernel &operator=(const CLLogits1DNormKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLLogits1DNormKernel(CLLogits1DNormKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLLogits1DNormKernel &operator=(CLLogits1DNormKernel &&) = default;
- /** Set the input and output tensors.
- *
- * @param[in] input Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
- * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
- * @param[out] output Destination tensor. Data types supported: QASYMM8/QASYMM8_SIGNED for S32 @p input, or same as @p input
- * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
- */
- void configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info);
- /** Set the input and output tensors.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
- * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
- * @param[out] output Destination tensor. Data types supported: QASYMM8/QASYMM8_SIGNED for S32 @p input, or same as @p input
- * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DNormKernel
- *
- * @param[in] input Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
- * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
- * @param[in] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
- * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- const ICLTensor *_sum;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H */
diff --git a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp
index 57f7af488b..f4c0839ad2 100644
--- a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp
+++ b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,8 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -37,19 +39,22 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_info, const ITensorInfo *paddings, const ITensorInfo *output)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *block_info,
+ const ITensorInfo *paddings,
+ const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, block_info, paddings, output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(block_info, 1, DataType::S32);
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
ARM_COMPUTE_RETURN_ERROR_ON(block_info->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(block_info->tensor_shape(), TensorShape{ 2 });
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(block_info->tensor_shape(), TensorShape{2});
ARM_COMPUTE_RETURN_ERROR_ON(paddings->num_dimensions() > 2);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(paddings->tensor_shape(), TensorShape{ 2, 2 });
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(paddings->tensor_shape(), TensorShape{2, 2});
// Validate output if initialized
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
const DataLayout data_layout = input->data_layout();
const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
@@ -60,7 +65,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_inf
return Status{};
}
-Status validate_arguments_static(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
+Status validate_arguments_static(const ITensorInfo *input,
+ const int block_shape_x,
+ const int block_shape_y,
+ const Size2D &padding_left,
+ const Size2D &padding_right,
const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
@@ -69,9 +78,10 @@ Status validate_arguments_static(const ITensorInfo *input, const int block_shape
ARM_COMPUTE_RETURN_ERROR_ON(block_shape_x < 1 || block_shape_y < 1);
// Validate output if initialized
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
- TensorShape expected_output_shape = misc::shape_calculator::compute_space_to_batch_shape(input, block_shape_x, block_shape_y, padding_left, padding_right);
+ TensorShape expected_output_shape = misc::shape_calculator::compute_space_to_batch_shape(
+ input, block_shape_x, block_shape_y, padding_left, padding_right);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), expected_output_shape);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
@@ -84,18 +94,27 @@ Status validate_arguments_static(const ITensorInfo *input, const int block_shape
CLSpaceToBatchLayerKernel::CLSpaceToBatchLayerKernel()
: _input(nullptr), _block_shape(nullptr), _paddings(nullptr), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const ICLTensor *block_shape, const ICLTensor *paddings, ICLTensor *output)
+void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input,
+ const ICLTensor *block_shape,
+ const ICLTensor *paddings,
+ ICLTensor *output)
{
configure(CLKernelLibrary::get().get_compile_context(), input, block_shape, paddings, output);
}
-void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, const ICLTensor *paddings, ICLTensor *output)
+void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *block_shape,
+ const ICLTensor *paddings,
+ ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, block_shape, paddings, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), block_shape->info(), paddings->info(), output->info()));
- auto padding_info = get_padding_info({ input, block_shape, paddings, output });
+ ARM_COMPUTE_ERROR_THROW_ON(
+ validate_arguments(input->info(), block_shape->info(), paddings->info(), output->info()));
+ auto padding_info = get_padding_info({input, block_shape, paddings, output});
_input = input;
_block_shape = block_shape;
@@ -109,14 +128,17 @@ void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_contex
// Create kernel
CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(input->info()->data_type())));
+ build_opts.add_option("-DDATA_TYPE=" +
+ get_cl_unsigned_type_from_element_size(data_size_from_type(input->info()->data_type())));
build_opts.add_option("-DWIDTH_OUT=" + support::cpp11::to_string(output->info()->dimension(idx_width)));
build_opts.add_option("-DHEIGHT_OUT=" + support::cpp11::to_string(output->info()->dimension(idx_height)));
build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(idx_batch)));
build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
build_opts.add_option("-DHEIGHT_IN=" + support::cpp11::to_string(input->info()->dimension(idx_height)));
build_opts.add_option("-DBATCH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_batch)));
- _kernel = create_kernel(compile_context, "space_to_batch_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
+ _kernel = create_kernel(compile_context,
+ "space_to_batch_" + lower_string(string_from_data_layout(input->info()->data_layout())),
+ build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*output->info(), Steps());
@@ -124,22 +146,34 @@ void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_contex
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
- ICLTensor *output)
+void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input,
+ const int block_shape_x,
+ const int block_shape_y,
+ const Size2D &padding_left,
+ const Size2D &padding_right,
+ ICLTensor *output)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, padding_left, padding_right, output);
+ configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, padding_left,
+ padding_right, output);
}
-void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left,
- const Size2D &padding_right,
- ICLTensor *output)
+void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const int block_shape_x,
+ const int block_shape_y,
+ const Size2D &padding_left,
+ const Size2D &padding_right,
+ ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- TensorShape output_shape = misc::shape_calculator::compute_space_to_batch_shape(input->info(), block_shape_x, block_shape_y, padding_left, padding_right);
- auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->quantization_info());
+ TensorShape output_shape = misc::shape_calculator::compute_space_to_batch_shape(
+ input->info(), block_shape_x, block_shape_y, padding_left, padding_right);
+ auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(),
+ input->info()->quantization_info());
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_static(input->info(), block_shape_x, block_shape_y, padding_left, padding_right, output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_static(input->info(), block_shape_x, block_shape_y, padding_left,
+ padding_right, output->info()));
_input = input;
_output = output;
@@ -151,7 +185,8 @@ void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_contex
// Create kernel
CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(input->info()->data_type())));
+ build_opts.add_option("-DDATA_TYPE=" +
+ get_cl_unsigned_type_from_element_size(data_size_from_type(input->info()->data_type())));
build_opts.add_option("-DWIDTH_OUT=" + support::cpp11::to_string(output->info()->dimension(idx_width)));
build_opts.add_option("-DHEIGHT_OUT=" + support::cpp11::to_string(output->info()->dimension(idx_height)));
build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(idx_batch)));
@@ -164,22 +199,32 @@ void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_contex
build_opts.add_option("-DPAD_RIGHT_X=" + support::cpp11::to_string(padding_right.x()));
build_opts.add_option("-DPAD_LEFT_Y=" + support::cpp11::to_string(padding_left.y()));
build_opts.add_option("-DPAD_RIGHT_Y=" + support::cpp11::to_string(padding_right.y()));
- _kernel = create_kernel(compile_context, "space_to_batch_static_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
+ _kernel = create_kernel(
+ compile_context, "space_to_batch_static_" + lower_string(string_from_data_layout(input->info()->data_layout())),
+ build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*output->info(), Steps());
ICLKernel::configure_internal(win);
}
-Status CLSpaceToBatchLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *paddings, const ITensorInfo *output)
+Status CLSpaceToBatchLayerKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *block_shape,
+ const ITensorInfo *paddings,
+ const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, block_shape, paddings, output));
return Status{};
}
-Status CLSpaceToBatchLayerKernel::validate(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
+Status CLSpaceToBatchLayerKernel::validate(const ITensorInfo *input,
+ const int block_shape_x,
+ const int block_shape_y,
+ const Size2D &padding_left,
+ const Size2D &padding_right,
const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_static(input, block_shape_x, block_shape_y, padding_left, padding_right, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_arguments_static(input, block_shape_x, block_shape_y, padding_left, padding_right, output));
return Status{};
}
@@ -216,7 +261,6 @@ void CLSpaceToBatchLayerKernel::run(const Window &window, cl::CommandQueue &queu
add_3D_tensor_argument(idx, _output, slice_out);
enqueue(queue, *this, slice_out, lws_hint());
++batch_id;
- }
- while(window.slide_window_slice_3D(slice_out));
+ } while (window.slide_window_slice_3D(slice_out));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.h b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.h
index 4817cfeef2..f9dce9db47 100644
--- a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.h
+++ b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLSPACETOBATCHLAYERKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -63,7 +64,11 @@ public:
* @param[in] paddings 2-D tensor with shape [2, M] (First dimension is the fastest-changing dimension). Supported M: 2. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, const ICLTensor *paddings, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const ICLTensor *block_shape,
+ const ICLTensor *paddings,
+ ICLTensor *output);
/** Initialise the kernel's input and output. (Static block shape and paddings)
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
@@ -73,7 +78,12 @@ public:
* @param[in] padding_right The padding at the end of every dimension of the output tensor.
* @param[out] output Tensor output. Data types supported: same as @p input
*/
- void configure(const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, ICLTensor *output);
+ void configure(const ICLTensor *input,
+ const int block_shape_x,
+ const int block_shape_y,
+ const Size2D &padding_left,
+ const Size2D &padding_right,
+ ICLTensor *output);
/** Initialise the kernel's input and output. (Static block shape and paddings)
*
* @param[in] compile_context The compile context to be used.
@@ -84,8 +94,13 @@ public:
* @param[in] padding_right The padding at the end of every dimension of the output tensor.
* @param[out] output Tensor output. Data types supported: same as @p input
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
- ICLTensor *output);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ const int block_shape_x,
+ const int block_shape_y,
+ const Size2D &padding_left,
+ const Size2D &padding_right,
+ ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLSpaceToBatchLayerKernel
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
@@ -95,7 +110,10 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *paddings, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *block_shape,
+ const ITensorInfo *paddings,
+ const ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLSpaceToBatchLayerKernel (Static block shape and paddings)
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
@@ -107,7 +125,12 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input,
+ const int block_shape_x,
+ const int block_shape_y,
+ const Size2D &padding_left,
+ const Size2D &padding_right,
+ const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp
index 4e5b417ec6..25662b5c62 100644
--- a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp
+++ b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,8 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -44,7 +46,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i
ARM_COMPUTE_RETURN_ERROR_ON(block_shape < 1);
// Validate output if initialized
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
const DataLayout data_layout = input->data_layout();
const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
@@ -63,9 +65,9 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i
}
} // namespace
-CLSpaceToDepthLayerKernel::CLSpaceToDepthLayerKernel()
- : _input(nullptr), _output(nullptr), _block_shape()
+CLSpaceToDepthLayerKernel::CLSpaceToDepthLayerKernel() : _input(nullptr), _output(nullptr), _block_shape()
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLSpaceToDepthLayerKernel::configure(const ICLTensor *input, ICLTensor *output, int32_t block_shape)
@@ -73,10 +75,13 @@ void CLSpaceToDepthLayerKernel::configure(const ICLTensor *input, ICLTensor *out
configure(CLKernelLibrary::get().get_compile_context(), input, output, block_shape);
}
-void CLSpaceToDepthLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape)
+void CLSpaceToDepthLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ int32_t block_shape)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- auto padding_info = get_padding_info({ input, output });
+ auto padding_info = get_padding_info({input, output});
TensorShape output_shape = compute_space_to_depth_shape(input->info(), block_shape);
auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
@@ -92,11 +97,14 @@ void CLSpaceToDepthLayerKernel::configure(const CLCompileContext &compile_contex
// Create kernel
CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(output->info()->data_type())));
+ build_opts.add_option("-DDATA_TYPE=" +
+ get_cl_unsigned_type_from_element_size(data_size_from_type(output->info()->data_type())));
build_opts.add_option("-DCHANNEL_SIZE=" + support::cpp11::to_string(output->info()->dimension(idx_channel)));
build_opts.add_option("-DBLOCK_SHAPE=" + support::cpp11::to_string(block_shape));
build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(output->info()->dimension(idx_width)));
- _kernel = create_kernel(compile_context, "space_to_depth_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
+ _kernel = create_kernel(compile_context,
+ "space_to_depth_" + lower_string(string_from_data_layout(input->info()->data_layout())),
+ build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*output->info(), Steps());
@@ -134,7 +142,6 @@ void CLSpaceToDepthLayerKernel::run(const Window &window, cl::CommandQueue &queu
enqueue(queue, *this, slice_out, lws_hint());
++batch_id;
- }
- while(window.slide_window_slice_3D(slice_out));
+ } while (window.slide_window_slice_3D(slice_out));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.h b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.h
index bb1ac5f9a6..d0932919e0 100644
--- a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.h
+++ b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CLSPACETODEPTHLAYERKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -61,7 +62,8 @@ public:
* @param[out] output Tensor output. Data types supported: same as @p input
* @param[in] block_shape Block shape value.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape);
+ void
+ configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape);
/** Static function to check if given info will lead to a valid configuration of @ref CLSpaceToDepthLayerKernel.
*
* @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: All.
diff --git a/src/core/CL/kernels/CLStackLayerKernel.cpp b/src/core/CL/kernels/CLStackLayerKernel.cpp
index 9bdcc8dc3f..23e26716e7 100644
--- a/src/core/CL/kernels/CLStackLayerKernel.cpp
+++ b/src/core/CL/kernels/CLStackLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,10 +30,10 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
using namespace arm_compute::misc::shape_calculator;
@@ -42,7 +42,11 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output)
+Status validate_arguments(const ITensorInfo *input,
+ unsigned int axis,
+ unsigned int idx_input,
+ unsigned int num_tensors,
+ const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
@@ -51,9 +55,10 @@ Status validate_arguments(const ITensorInfo *input, unsigned int axis, unsigned
ARM_COMPUTE_RETURN_ERROR_ON(axis > input->num_dimensions());
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), compute_stack_shape(*input, axis, num_tensors));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(),
+ compute_stack_shape(*input, axis, num_tensors));
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
}
@@ -61,7 +66,8 @@ Status validate_arguments(const ITensorInfo *input, unsigned int axis, unsigned
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, unsigned int axis, unsigned int num_tensors, ITensorInfo *output)
+std::pair<Status, Window>
+validate_and_configure_window(ITensorInfo *input, unsigned int axis, unsigned int num_tensors, ITensorInfo *output)
{
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_stack_shape(*input, axis, num_tensors)));
@@ -73,17 +79,23 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, unsi
}
} // namespace
-CLStackLayerKernel::CLStackLayerKernel()
- : _input(nullptr), _output(nullptr)
+CLStackLayerKernel::CLStackLayerKernel() : _input(nullptr), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
-void CLStackLayerKernel::configure(const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output)
+void CLStackLayerKernel::configure(
+ const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output)
{
configure(CLKernelLibrary::get().get_compile_context(), input, axis, idx_input, num_tensors, output);
}
-void CLStackLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output)
+void CLStackLayerKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ unsigned int axis,
+ unsigned int idx_input,
+ unsigned int num_tensors,
+ ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), axis, idx_input, num_tensors, output->info()));
@@ -111,10 +123,15 @@ void CLStackLayerKernel::configure(const CLCompileContext &compile_context, cons
_kernel.setArg<cl_uint>(idx, idx_input);
}
-Status CLStackLayerKernel::validate(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output)
+Status CLStackLayerKernel::validate(const ITensorInfo *input,
+ unsigned int axis,
+ unsigned int idx_input,
+ unsigned int num_tensors,
+ const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, axis, idx_input, num_tensors, output));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), axis, num_tensors, output->clone().get()).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_and_configure_window(input->clone().get(), axis, num_tensors, output->clone().get()).first);
return Status{};
}
diff --git a/src/core/CL/kernels/CLStackLayerKernel.h b/src/core/CL/kernels/CLStackLayerKernel.h
index 2865127a90..d3c17f529c 100644
--- a/src/core/CL/kernels/CLStackLayerKernel.h
+++ b/src/core/CL/kernels/CLStackLayerKernel.h
@@ -26,6 +26,7 @@
#define ARM_COMPUTE_CLSTACKLAYERKERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -60,7 +61,8 @@ public:
* @param[out] output Output tensor. Data types supported: Same as @p input.
*
*/
- void configure(const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output);
+ void configure(
+ const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output);
/** Initialise the kernel's inputs and output
*
* @note Supported input tensor rank: up to 4
@@ -74,7 +76,12 @@ public:
* @param[out] output Output tensor. Data types supported: Same as @p input.
*
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ unsigned int axis,
+ unsigned int idx_input,
+ unsigned int num_tensors,
+ ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLStackLayerKernel
*
* @note Supported input tensor rank: up to 4
@@ -88,7 +95,11 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input,
+ unsigned int axis,
+ unsigned int idx_input,
+ unsigned int num_tensors,
+ const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLStridedSliceKernel.cpp b/src/core/CL/kernels/CLStridedSliceKernel.cpp
index 75fd01df14..20cd835069 100644
--- a/src/core/CL/kernels/CLStridedSliceKernel.cpp
+++ b/src/core/CL/kernels/CLStridedSliceKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,10 +22,13 @@
* SOFTWARE.
*/
#include "src/core/CL/kernels/CLStridedSliceKernel.h"
+
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/utils/helpers/tensor_transform.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "src/core/utils/helpers/bit_ops.h"
@@ -36,9 +39,14 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
- const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
- int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
+Status validate_arguments(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const Coordinates &starts,
+ const Coordinates &ends,
+ const BiStrides &strides,
+ int32_t begin_mask,
+ int32_t end_mask,
+ int32_t shrink_axis_mask)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
@@ -47,19 +55,16 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
ARM_COMPUTE_RETURN_ERROR_ON(starts.num_dimensions() > input->num_dimensions());
ARM_COMPUTE_RETURN_ERROR_ON(ends.num_dimensions() > input->num_dimensions());
ARM_COMPUTE_RETURN_ERROR_ON(strides.num_dimensions() > input->num_dimensions());
- ARM_COMPUTE_RETURN_ERROR_ON(std::any_of(strides.cbegin(), strides.cbegin() + strides.num_dimensions(), [](int i)
- {
- return i == 0;
- }));
+ ARM_COMPUTE_RETURN_ERROR_ON(
+ std::any_of(strides.cbegin(), strides.cbegin() + strides.num_dimensions(), [](int i) { return i == 0; }));
// Get expected output shape
- const TensorShape exp_output_shape = arm_compute::misc::shape_calculator::compute_strided_slice_shape(*input,
- starts, ends, strides,
- begin_mask, end_mask, shrink_axis_mask);
+ const TensorShape exp_output_shape = arm_compute::misc::shape_calculator::compute_strided_slice_shape(
+ *input, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
ARM_COMPUTE_RETURN_ERROR_ON(exp_output_shape.total_size() == 0);
// Checks output if configured
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
const TensorInfo exp_output_info = output->clone()->set_tensor_shape(exp_output_shape);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &exp_output_info);
@@ -68,46 +73,42 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
return Status{};
}
+} // namespace
-std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input, ITensorInfo *output,
- const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
- int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
+CLStridedSliceKernel::CLStridedSliceKernel()
{
- // Output tensor auto initialization if not yet initialized
- const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_strided_slice_shape(*input,
- starts, ends, strides,
- begin_mask, end_mask, shrink_axis_mask);
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape));
-
- // Create window
- Window win = calculate_max_window(*output, Steps());
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
-
- return std::make_pair(Status{}, win);
+ _type = CLKernelType::ELEMENTWISE;
}
-} // namespace
-void CLStridedSliceKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output,
- const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
- int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
+void CLStridedSliceKernel::configure(const CLCompileContext &compile_context,
+ const ITensorInfo *input,
+ ITensorInfo *output,
+ const Coordinates &starts,
+ const Coordinates &ends,
+ const BiStrides &strides,
+ int32_t begin_mask,
+ int32_t end_mask,
+ int32_t shrink_axis_mask)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- auto padding_info = get_padding_info({ input, output });
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask));
+ auto padding_info = get_padding_info({input, output});
+ ARM_COMPUTE_ERROR_THROW_ON(
+ validate_arguments(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask));
const TensorShape &input_shape = input->tensor_shape();
Coordinates starts_abs;
Coordinates ends_abs;
Coordinates final_strides;
- std::tie(starts_abs, ends_abs, final_strides) = arm_compute::helpers::tensor_transform::calculate_strided_slice_coords(
- input_shape,
- starts, ends, strides,
- begin_mask, end_mask, shrink_axis_mask);
+ std::tie(starts_abs, ends_abs, final_strides) =
+ arm_compute::helpers::tensor_transform::calculate_strided_slice_coords(input_shape, starts, ends, strides,
+ begin_mask, end_mask, shrink_axis_mask);
// Configure kernel window
- auto win_config = validate_and_configure_window(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_strided_slice_shape(
+ *input, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape));
+ Window win = calculate_max_window(*output, Steps());
// Enable multiple elements processing along x if stride_x is 1 and output width greater than the access vector size
const int vec_size_x = 16 / input->element_size();
@@ -116,29 +117,31 @@ void CLStridedSliceKernel::configure(const CLCompileContext &compile_context, co
const bool multi_access_x = !is_shrink_on_x && (final_strides.x() == 1) && (output_width_x / vec_size_x > 0);
// Update window if needed
- if(multi_access_x)
+ if (multi_access_x)
{
- Window &updated_window = std::get<1>(win_config);
+ Window &updated_window = win;
updated_window.set(Window::DimX,
- Window::Dimension(updated_window.x().start(), ceil_to_multiple(updated_window.x().end(), vec_size_x), vec_size_x));
+ Window::Dimension(updated_window.x().start(),
+ ceil_to_multiple(updated_window.x().end(), vec_size_x), vec_size_x));
}
- ICLKernel::configure_internal(win_config.second);
+ ICLKernel::configure_internal(win);
// Create build options
CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(input->data_type())));
- for(unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
+ build_opts.add_option("-DDATA_TYPE=" +
+ get_cl_unsigned_type_from_element_size(data_size_from_type(input->data_type())));
+ for (unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
{
const bool is_shrink = arm_compute::helpers::bit_ops::is_bit_set(shrink_axis_mask, i);
- build_opts.add_option("-DSTART_" + support::cpp11::to_string(i) + "=" + support::cpp11::to_string(starts_abs[i]));
- build_opts.add_option("-DSTRIDE_" + support::cpp11::to_string(i) + "=" + support::cpp11::to_string(final_strides[i]));
+ build_opts.add_option("-DSTART_" + support::cpp11::to_string(i) + "=" +
+ support::cpp11::to_string(starts_abs[i]));
+ build_opts.add_option("-DSTRIDE_" + support::cpp11::to_string(i) + "=" +
+ support::cpp11::to_string(final_strides[i]));
build_opts.add_option_if(is_shrink, "-DSHRINK_" + support::cpp11::to_string(i));
}
- build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
+ build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(
+ std::max<int>(output_width_x - vec_size_x, 0)));
build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option_if_else(input_shape.num_dimensions() > 2,
- "-DSRC_DEPTH=" + support::cpp11::to_string(input_shape.z()),
- "-DSRC_DEPTH=1");
build_opts.add_option_if_else(output->num_dimensions() > 2,
"-DDST_DEPTH=" + support::cpp11::to_string(output->tensor_shape().z()),
"-DDST_DEPTH=1");
@@ -150,7 +153,7 @@ void CLStridedSliceKernel::configure(const CLCompileContext &compile_context, co
_config_id = "strided_slice";
_config_id += "_";
_config_id += lower_string(string_from_data_type(input->data_type()));
- for(unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
+ for (unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
{
_config_id += "_";
_config_id += support::cpp11::to_string(input->dimension(i));
@@ -164,14 +167,17 @@ void CLStridedSliceKernel::configure(const CLCompileContext &compile_context, co
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLStridedSliceKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
- const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
- int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
+Status CLStridedSliceKernel::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const Coordinates &starts,
+ const Coordinates &ends,
+ const BiStrides &strides,
+ int32_t begin_mask,
+ int32_t end_mask,
+ int32_t shrink_axis_mask)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(),
- starts, ends, strides, begin_mask, end_mask, shrink_axis_mask)
- .first);
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_arguments(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask));
return Status{};
}
@@ -181,8 +187,9 @@ void CLStridedSliceKernel::run_op(ITensorPack &tensors, const Window &window, cl
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+ const auto src =
+ utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
Window slice = window_collapsed.first_slice_window_4D();
@@ -193,7 +200,6 @@ void CLStridedSliceKernel::run_op(ITensorPack &tensors, const Window &window, cl
add_4D_tensor_argument(idx, src, slice);
add_4D_tensor_argument(idx, dst, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_4D(slice));
+ } while (window_collapsed.slide_window_slice_4D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLStridedSliceKernel.h b/src/core/CL/kernels/CLStridedSliceKernel.h
index 599cf34c39..1cf5bcacec 100644
--- a/src/core/CL/kernels/CLStridedSliceKernel.h
+++ b/src/core/CL/kernels/CLStridedSliceKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CL_STRIDED_SLICE_KERNEL_H
#include "arm_compute/core/Types.h"
+
#include "src/core/CL/ICLKernel.h"
#include <cstdint>
@@ -35,6 +36,9 @@ namespace arm_compute
class CLStridedSliceKernel : public ICLKernel
{
public:
+ /** Default constructor */
+ CLStridedSliceKernel();
+
/** Configure kernel
*
* @note Supported tensor rank: up to 4
@@ -50,9 +54,15 @@ public:
* @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1.
* A slice of size 1 starting from starts[i] in the dimension must be preserved.
*/
- void configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output,
- const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
- int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask);
+ void configure(const CLCompileContext &compile_context,
+ const ITensorInfo *input,
+ ITensorInfo *output,
+ const Coordinates &starts,
+ const Coordinates &ends,
+ const BiStrides &strides,
+ int32_t begin_mask,
+ int32_t end_mask,
+ int32_t shrink_axis_mask);
/** Static function to check if given info will lead to a valid configuration of @ref CLStridedSliceKernel
*
@@ -68,9 +78,14 @@ public:
* @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1.
* A slice of size 1 starting from starts[i] in the dimension must be preserved.
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output,
- const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
- int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask);
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const Coordinates &starts,
+ const Coordinates &ends,
+ const BiStrides &strides,
+ int32_t begin_mask,
+ int32_t end_mask,
+ int32_t shrink_axis_mask);
// Inherited methods overridden:
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/core/CL/kernels/CLTileKernel.cpp b/src/core/CL/kernels/CLTileKernel.cpp
index c0c3d2e2ee..fa996c4008 100644
--- a/src/core/CL/kernels/CLTileKernel.cpp
+++ b/src/core/CL/kernels/CLTileKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,8 +22,11 @@
* SOFTWARE.
*/
#include "src/core/CL/kernels/CLTileKernel.h"
+
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/StringUtils.h"
+
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
@@ -38,15 +41,13 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON(multiples.size() > 4);
ARM_COMPUTE_RETURN_ERROR_ON(multiples.empty());
- ARM_COMPUTE_RETURN_ERROR_ON(std::any_of(multiples.begin(), multiples.end(), [](uint32_t e)
- {
- return e == 0;
- }));
+ ARM_COMPUTE_RETURN_ERROR_ON(std::any_of(multiples.begin(), multiples.end(), [](uint32_t e) { return e == 0; }));
// Validate output if initialized
- if(output->total_size() != 0)
+ if (output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(misc::shape_calculator::compute_tiled_shape(input->tensor_shape(), multiples), output->tensor_shape());
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(
+ misc::shape_calculator::compute_tiled_shape(input->tensor_shape(), multiples), output->tensor_shape());
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -54,9 +55,9 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
}
} // namespace
-CLTileKernel::CLTileKernel()
- : _input(nullptr), _output(nullptr)
+CLTileKernel::CLTileKernel() : _input(nullptr), _output(nullptr)
{
+ _type = CLKernelType::ELEMENTWISE;
}
void CLTileKernel::configure(const ICLTensor *input, ICLTensor *output, const Multiples &multiples)
@@ -64,7 +65,10 @@ void CLTileKernel::configure(const ICLTensor *input, ICLTensor *output, const Mu
configure(CLKernelLibrary::get().get_compile_context(), input, output, multiples);
}
-void CLTileKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Multiples &multiples)
+void CLTileKernel::configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const Multiples &multiples)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
@@ -78,11 +82,13 @@ void CLTileKernel::configure(const CLCompileContext &compile_context, const ICLT
_input = input;
_output = output;
- const DataType data_type = input->info()->data_type();
- const int vec_size_x = 16 / input->info()->element_size();
- const int input_width_x = input->info()->tensor_shape().x();
- const unsigned int offset = ceil_to_multiple(input_width_x, vec_size_x) - input_width_x;
- const bool multi_access_x = (input_width_x / vec_size_x > 0);
+ const DataType data_type = input->info()->data_type();
+ const int vec_size_x = 16 / input->info()->element_size();
+ const int input_width_x = input->info()->tensor_shape().x();
+ const unsigned int input_width_ceil = ceil_to_multiple(input_width_x, vec_size_x);
+ const unsigned int input_width_tiles = input_width_ceil / vec_size_x;
+ const unsigned int offset = input_width_ceil - input_width_x;
+ const bool multi_access_x = (input_width_x / vec_size_x > 0);
// Create kernel
CLBuildOptions build_opts;
@@ -94,20 +100,20 @@ void CLTileKernel::configure(const CLCompileContext &compile_context, const ICLT
build_opts.add_option("-DDST_DEPTH=" + support::cpp11::to_string(output->info()->dimension(2)));
build_opts.add_option_if(multi_access_x, "-DOFFSET=" + support::cpp11::to_string(offset));
build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
+ build_opts.add_option_if(multi_access_x, "-DSRC_WIDTH_TILES=" + support::cpp11::to_string(input_width_tiles));
_kernel = create_kernel(compile_context, "tile", build_opts.options());
// Configure window without padding
Window win = calculate_max_window(*output->info());
- if(multi_access_x)
+ if (multi_access_x)
{
// If multi-access is enabled, no thread should cross the tile boundaries. This means we need
// as many threads as those to cover a single tile times multiples[0]. Note that if threads
// do not cross the boundaries of the tiles, they won't cross the boundaries of the last tile, and
// we don't need to pad the output
const unsigned int size_win_x = ceil_to_multiple(input->info()->dimension(0), vec_size_x) * multiples[0];
- win.set(Window::DimX,
- Window::Dimension(win.x().start(), size_win_x, vec_size_x));
+ win.set(Window::DimX, Window::Dimension(win.x().start(), size_win_x, vec_size_x));
}
ICLKernel::configure_internal(win);
@@ -116,7 +122,7 @@ void CLTileKernel::configure(const CLCompileContext &compile_context, const ICLT
_config_id = "tile";
_config_id += "_";
_config_id += lower_string(string_from_data_type(input->info()->data_type()));
- for(unsigned int i = 0; i < multiples.size(); ++i)
+ for (unsigned int i = 0; i < multiples.size(); ++i)
{
_config_id += "_";
_config_id += support::cpp11::to_string(input->info()->dimension(i));
@@ -145,7 +151,6 @@ void CLTileKernel::run(const Window &window, cl::CommandQueue &queue)
add_4D_tensor_argument(idx, _input, slice);
add_4D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_4D(slice));
+ } while (collapsed.slide_window_slice_4D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLTileKernel.h b/src/core/CL/kernels/CLTileKernel.h
index 41752ca90b..c3486aecef 100644
--- a/src/core/CL/kernels/CLTileKernel.h
+++ b/src/core/CL/kernels/CLTileKernel.h
@@ -64,7 +64,10 @@ public:
* @param[out] output Destination tensor. Same as @p input
*
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Multiples &multiples);
+ void configure(const CLCompileContext &compile_context,
+ const ICLTensor *input,
+ ICLTensor *output,
+ const Multiples &multiples);
/** Static function to check if given info will lead to a valid configuration of @ref CLTileKernel
*
* @param[in] input Source tensor info. Data type supported: All.
diff --git a/src/core/CL/kernels/CLTransposeKernel.cpp b/src/core/CL/kernels/CLTransposeKernel.cpp
deleted file mode 100644
index 56ff48be1f..0000000000
--- a/src/core/CL/kernels/CLTransposeKernel.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLTransposeKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-TensorShape transposed_tensor_shape(const TensorShape &in)
-{
- TensorShape output_shape{ in };
- const size_t w_out = in[1];
- const size_t h_out = in[0];
- output_shape.set(0, w_out);
- output_shape.set(1, h_out);
-
- return output_shape;
-}
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
-
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info = input->clone()->set_tensor_shape(transposed_tensor_shape(input->tensor_shape()));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- }
-
- return Status{};
-}
-} // namespace
-
-Status CLTransposeKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
- return Status{};
-}
-
-void CLTransposeKernel::configure(const ICLTensor *input, ICLTensor *output)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output);
-}
-
-void CLTransposeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output tensor auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(transposed_tensor_shape(input->info()->tensor_shape())));
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
- auto padding_info = get_padding_info({ input, output });
-
- _input = input;
- _output = output;
-
- const unsigned int vec_size_x = adjust_vec_size(max_cl_vector_width / input->info()->element_size(), input->info()->dimension(0));
- const int vec_size_x_leftovers = input->info()->dimension(0) % vec_size_x;
- const unsigned int vec_size_y = adjust_vec_size(max_cl_vector_width / input->info()->element_size(), input->info()->dimension(1));
- const int vec_size_y_leftovers = input->info()->dimension(1) % vec_size_y;
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE_IN_BYTES=" + support::cpp11::to_string(input->info()->element_size()));
- build_opts.add_option("-DVEC_SIZE_X=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER_X=" + support::cpp11::to_string(vec_size_x_leftovers));
- build_opts.add_option("-DVEC_SIZE_Y=" + support::cpp11::to_string(vec_size_y));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER_Y=" + support::cpp11::to_string(vec_size_y_leftovers));
-
- _kernel = create_kernel(compile_context, "transpose", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps(vec_size_x, vec_size_y));
- ICLKernel::configure_internal(win, cl::NDRange(2, 8));
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/CL/kernels/CLTransposeKernel.h b/src/core/CL/kernels/CLTransposeKernel.h
deleted file mode 100644
index 0c4b7b4aff..0000000000
--- a/src/core/CL/kernels/CLTransposeKernel.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLTRANSPOSEKERNEL_H
-#define ARM_COMPUTE_CLTRANSPOSEKERNEL_H
-
-#include "src/core/CL/ICLSimple2DKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel which transposes the elements of a matrix.
- *
- * [width, height, batch] -> [height, width, batch]
- *
- */
-class CLTransposeKernel : public ICLSimple2DKernel
-{
-public:
- /** Initialise the kernel's input and output.
- *
- * @param[in] input Input tensor. Data types supported: All.
- * @param[out] output Output tensor. Data type supported: Same as @p input
- */
- void configure(const ICLTensor *input, ICLTensor *output);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data types supported: All.
- * @param[out] output Output tensor. Data type supported: Same as @p input
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref CLTransposeKernel
- *
- * @param[in] input Input tensor. Data types supported: All.
- * @param[in] output Output tensor. Data type supported: Same as @p input
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLTRANSPOSEKERNEL_H */
diff --git a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp b/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
deleted file mode 100644
index 559f47ce26..0000000000
--- a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON(num_groups == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::NHWC && num_groups > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4 && num_groups > 1);
- ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(3) % num_groups) != 0);
-
- if(biases != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(!is_data_type_float(input->data_type()));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
- ARM_COMPUTE_RETURN_ERROR_ON((input->num_dimensions() == 4) && (biases->num_dimensions() != 1));
- ARM_COMPUTE_RETURN_ERROR_ON((input->num_dimensions() == 5) && (biases->num_dimensions() != 2));
- ARM_COMPUTE_RETURN_ERROR_ON((input->num_dimensions() == 4) && (biases->dimension(0) != input->tensor_shape()[3]));
- ARM_COMPUTE_RETURN_ERROR_ON((input->num_dimensions() == 5) && (biases->dimension(0) != input->tensor_shape()[3] || biases->dimension(1) != input->tensor_shape()[4]));
- }
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), compute_weights_reshaped_shape(*input, biases != nullptr, num_groups));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- }
-
- return Status{};
-}
-} // namespace
-
-CLWeightsReshapeKernel::CLWeightsReshapeKernel()
- : _input(nullptr), _biases(nullptr), _output(nullptr)
-{
-}
-
-void CLWeightsReshapeKernel::configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, biases, output, num_groups);
-}
-
-void CLWeightsReshapeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output tensor auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_weights_reshaped_shape(*input->info(), (biases != nullptr), num_groups)));
-
- // Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(),
- (biases != nullptr) ? biases->info() : nullptr,
- output->info(), num_groups));
-
- auto padding_info = get_padding_info({ input, biases, output });
-
- const DataType data_type = input->info()->data_type();
-
- _biases = biases;
- _output = output;
- _input = input;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(data_type)));
- build_opts.add_option("-DNUM_GROUPS=" + support::cpp11::to_string(num_groups));
- build_opts.add_option_if(biases != nullptr, "-DHAS_BIAS");
-
- // Create kernel
- _kernel = create_kernel(compile_context, "reshape_to_columns", build_opts.options());
-
- // Configure window
- Window win = calculate_max_window(*input->info(), Steps());
- // The CLWeightsReshapeKernel doesn't need padding so update_window_and_padding() can be skipped
- output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLWeightsReshapeKernel::validate(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, biases, output, num_groups));
- return Status{};
-}
-
-void CLWeightsReshapeKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
-
- Window out_window;
- out_window.use_tensor_dimensions(_output->info()->tensor_shape());
-
- Window in_slice = window.first_slice_window_3D();
- Window out_slice = out_window.first_slice_window_2D();
-
- Window biases_window;
- Window biases_slice;
-
- unsigned int idx = num_arguments_per_3D_tensor() + num_arguments_per_2D_tensor();
- idx += (_biases != nullptr) ? num_arguments_per_1D_tensor() : 0;
- _kernel.setArg<cl_uint>(idx++, _input->info()->dimension(0));
- _kernel.setArg<cl_uint>(idx++, _input->info()->dimension(1));
- _kernel.setArg<cl_uint>(idx++, _input->info()->dimension(2));
- _kernel.setArg<cl_uint>(idx++, _input->info()->dimension(3));
- _kernel.setArg<cl_uint>(idx++, _output->info()->strides_in_bytes().z());
-
- if(_biases != nullptr)
- {
- biases_window.use_tensor_dimensions(_biases->info()->tensor_shape());
- biases_slice = biases_window.first_slice_window_1D();
- }
-
- do
- {
- // Set arguments
- unsigned idx = 0;
- add_3D_tensor_argument(idx, _input, in_slice);
- add_2D_tensor_argument(idx, _output, out_slice);
- if(_biases != nullptr)
- {
- add_1D_tensor_argument(idx, _biases, biases_slice);
- ARM_COMPUTE_UNUSED(biases_window.slide_window_slice_1D(biases_slice));
- }
-
- // Run kernel
- enqueue(queue, *this, in_slice, lws_hint());
- }
- while(window.slide_window_slice_4D(in_slice) && out_window.slide_window_slice_2D(out_slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLWeightsReshapeKernel.h b/src/core/CL/kernels/CLWeightsReshapeKernel.h
deleted file mode 100644
index 402a60472b..0000000000
--- a/src/core/CL/kernels/CLWeightsReshapeKernel.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLWEIGHTSRESHAPEKERNEL_H
-#define ARM_COMPUTE_CLWEIGHTSRESHAPEKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-/** OpenCL kernel to perform reshaping on the weights used by convolution and locally connected layer
- *
- * Rearranges each 3-dimensional kernel to a single row leading to a matrix with linearized kernels.
- * In combination with the @ref CLIm2ColKernel can transform a convolution to a matrix multiplication.
- *
- * For example assuming a 3D weight kernel of 3x3 dimensions and depth of 2 we have:
- * @f[
- * \left( \begin{array}{ccc}
- * a000 & a001 & a002 \\
- * a010 & a011 & a012 \\
- * a020 & a021 & a022 \\
- * \end{array} \right)
- * \left( \begin{array}{ccc}
- * a100 & a101 & a102 \\
- * a110 & a111 & a112 \\
- * a120 & a121 & a122 \\
- * \end{array} \right)
- * \rightarrow
- * \left( \begin{array}{ccccccccc}
- * a000 & a001 & a002 & a010 & a011 & a012 & a020 & a021 & a022 & a100 & a101 & a102 & a110 & a111 & a112 & a120 & a121 & a122 \\
- * \end{array} \right)
- * @f]
- */
-class CLWeightsReshapeKernel : public ICLKernel
-{
-public:
- /** Constructor.*/
- CLWeightsReshapeKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWeightsReshapeKernel(const CLWeightsReshapeKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWeightsReshapeKernel &operator=(const CLWeightsReshapeKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLWeightsReshapeKernel(CLWeightsReshapeKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLWeightsReshapeKernel &operator=(CLWeightsReshapeKernel &&) = default;
- /** Default destructor */
- ~CLWeightsReshapeKernel() = default;
- /** Set the input and output of the kernel.
- *
- * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All
- * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
- * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr.
- * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
- * @param[out] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise.
- * Data types supported: Same as @p input
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
- */
- void configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1);
- /** Set the input and output of the kernel.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All
- * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
- * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr.
- * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
- * @param[out] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise.
- * Data types supported: Same as @p input
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1);
- /** Static function to check if given info will lead to a valid configuration of @ref CLWeightsReshapeKernel
- *
- * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All
- * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
- * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr.
- * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
- * @param[in] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise.
- * Data types supported: Same as @p input
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups = 1);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- const ICLTensor *_biases;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLWEIGHTSRESHAPEKERNEL_H */ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp
deleted file mode 100644
index bd45ddb65f..0000000000
--- a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/IAccessWindow.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include "support/StringSupport.h"
-
-using namespace arm_compute::misc::shape_calculator;
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
-
- const Size2D kernel_size = winograd_info.kernel_size;
- const Size2D output_tile_size = winograd_info.output_tile_size;
-
- const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
- const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!cl_winograd_convolution_layer_supported(output_tile_size, kernel_size, input->data_layout()), "Winograd filter transform not supported");
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_w) != kernel_size.width || input->dimension(idx_h) != kernel_size.height);
- ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input, winograd_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_UNUSED(output);
-
- const unsigned int num_elems_processed_per_iteration_x = input->data_layout() == DataLayout::NCHW ? input->dimension(0) : 1;
- const unsigned int num_elems_processed_per_iteration_y = input->dimension(1);
- const unsigned int num_elems_read_per_iteration_z = input->data_layout() == DataLayout::NCHW ? 1 : input->dimension(2);
-
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y, num_elems_read_per_iteration_z));
- Window win_collapsed = win.collapse(win, Window::DimZ);
- return std::make_pair(Status{}, win_collapsed);
-}
-} // namespace
-
-CLWinogradFilterTransformKernel::CLWinogradFilterTransformKernel()
- : _input(nullptr), _output(nullptr)
-{
-}
-
-void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info);
-}
-
-void CLWinogradFilterTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input->info(), winograd_info)));
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info));
- auto padding_info = get_padding_info({ input, output });
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DSRC_DIM_Z=" + support::cpp11::to_string(input->info()->dimension(2)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL");
- build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_FILTER_TRANSFORM_VERTICAL");
- const Size2D kernel_size = winograd_info.kernel_size;
- const Size2D output_tile_size = winograd_info.output_tile_size;
-
- // Create kernel
- std::string kernel_name = "winograd_filter_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_" + lower_string(string_from_data_layout(input->info()->data_layout()));
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- _input = input;
- _output = output;
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info());
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status CLWinogradFilterTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, winograd_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
-
- return Status{};
-}
-
-void CLWinogradFilterTransformKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- // Setup output window
- Window window_out;
- window_out.use_tensor_dimensions(_output->info()->tensor_shape(), 0);
-
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, window);
- add_3D_tensor_argument(idx, _output, window_out);
- enqueue(queue, *this, window, lws_hint());
-}
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/CL/kernels/CLWinogradFilterTransformKernel.h b/src/core/CL/kernels/CLWinogradFilterTransformKernel.h
deleted file mode 100644
index d22fedebcd..0000000000
--- a/src/core/CL/kernels/CLWinogradFilterTransformKernel.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLWINOGRADFILTERTRANSFORMKERNEL_H
-#define ARM_COMPUTE_CLWINOGRADFILTERTRANSFORMKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the Winograd filter transform kernel. */
-class CLWinogradFilterTransformKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLWinogradFilterTransformKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradFilterTransformKernel(const CLWinogradFilterTransformKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradFilterTransformKernel &operator=(const CLWinogradFilterTransformKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLWinogradFilterTransformKernel(CLWinogradFilterTransformKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLWinogradFilterTransformKernel &operator=(CLWinogradFilterTransformKernel &&) = default;
- /** Default destructor */
- ~CLWinogradFilterTransformKernel() = default;
- /** Set the input and output tensor.
- *
- * @note Winograd filter transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd filter transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input Source tensor. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout) or [IFM, kernel_x, kernel_y, OFM] (NHWC data layout). Data types supported: F16/F32.
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_filter_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- */
- void configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Set the input and output tensor.
- *
- * @note Winograd filter transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd filter transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout) or [IFM, kernel_x, kernel_y, OFM] (NHWC data layout). Data types supported: F16/F32.
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_filter_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradFilterTransformKernel
- *
- * @note Winograd filter transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd filter transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input Source tensor. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout) or [IFM, kernel_x, kernel_y, OFM] (NHWC data layout). Data types supported: F16/F32.
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_filter_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLWINOGRADFILTERTRANSFORMKERNEL_H */
diff --git a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
deleted file mode 100644
index 392edda615..0000000000
--- a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLWinogradInputTransformKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-using namespace arm_compute;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
-
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D kernel_size = winograd_info.kernel_size;
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd input transform only supports unit strides");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!cl_winograd_convolution_layer_supported(output_tile_size, kernel_size, input->data_layout()), "Winograd input transform not supported");
-
- ARM_COMPUTE_UNUSED(conv_info);
- ARM_COMPUTE_UNUSED(output_tile_size);
- ARM_COMPUTE_UNUSED(kernel_size);
-
- // Validate configured output
- if(output->total_size() != 0)
- {
- const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_UNUSED(output);
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- bool window_changed = false;
- Window win = calculate_max_window(*input, Steps(1, 1));
-
- if(input->data_layout() == DataLayout::NCHW)
- {
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D kernel_size = winograd_info.kernel_size;
-
- unsigned int num_elems_read_per_iteration_x = output_tile_size.width + kernel_size.width - 1;
- unsigned int num_elems_read_per_iteration_y = output_tile_size.height + kernel_size.height - 1;
-
- AccessWindowRectangle input_access(input, -conv_info.pad_left(), -conv_info.pad_top(), num_elems_read_per_iteration_x, num_elems_read_per_iteration_y);
- window_changed = update_window_and_padding(win, input_access);
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLWinogradInputTransformKernel::CLWinogradInputTransformKernel()
- : _border_size(0), _input(nullptr), _output(nullptr), _data_layout(DataLayout::UNKNOWN), _num_tiles_x(0), _num_tiles_y(0), _step_z(1)
-{
-}
-
-BorderSize CLWinogradInputTransformKernel::border_size() const
-{
- return _border_size;
-}
-
-void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info);
-}
-
-void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info));
-
- auto padding_info = get_padding_info({ input, output });
-
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D kernel_size = winograd_info.kernel_size;
-
- _data_layout = input->info()->data_layout();
-
- const size_t idx_w = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const size_t idx_h = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
-
- // Compute the number of output tiles along the x and y direction of size "output_tile_size"
- const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input->info()->dimension(idx_w), input->info()->dimension(idx_h)),
- kernel_size,
- output_tile_size,
- conv_info);
-
- _input = input;
- _output = output;
- _num_tiles_x = num_tiles.width;
- _num_tiles_y = num_tiles.height;
-
- const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input->info(), winograd_info);
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
-
- ARM_COMPUTE_ERROR_ON(_num_tiles_x * _num_tiles_y != static_cast<int>(output->info()->dimension(1)));
- const size_t total_batches = input->info()->tensor_shape().total_size_upper(3);
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(_num_tiles_x));
- build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
- build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
- build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
- build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL");
- build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_INPUT_TRANSFORM_VERTICAL");
- if(_data_layout == DataLayout::NHWC)
- {
- build_opts.add_option_if(total_batches > 1, "-DNUM_TILES_Y=" + support::cpp11::to_string(_num_tiles_y));
- build_opts.add_option("-DSRC_DIM_1=" + support::cpp11::to_string(_input->info()->dimension(1)));
- build_opts.add_option("-DSRC_DIM_2=" + support::cpp11::to_string(_input->info()->dimension(2)));
- }
- else
- {
- build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(_input->info()->dimension(2)));
- }
-
- // Create kernel
- std::string kernel_name = "winograd_input_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string();
-
- // Get the maximum dimension from the tile size
- const unsigned int tile_max_dim = std::max(output_tile_size.width, output_tile_size.height);
-
- // Check optimized kernel if output_dims == 2x2
- if((tile_max_dim == 2) && (_data_layout == DataLayout::NCHW))
- {
- _step_z = (_input->info()->dimension(2) % 2) != 0 ? 1 : 2;
- }
-
- // Append stepz and data layout
- kernel_name += "_stepz";
- kernel_name += support::cpp11::to_string(_step_z);
- kernel_name += "_" + lower_string(string_from_data_layout(_data_layout));
-
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Create window and update padding
- auto win_config = validate_and_configure_window(input->info(), output->info(), winograd_info);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second, cl::NDRange(1, 1, 8));
-
- _border_size = BorderSize(_input->info()->padding());
-
- ARM_COMPUTE_ERROR_ON((input->info()->data_layout() == DataLayout::NHWC) && has_padding_changed(padding_info));
-
- _config_id = kernel_name;
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(conv_info.pad_left());
- _config_id += "_";
- _config_id += support::cpp11::to_string(conv_info.pad_top());
- _config_id += "_";
- _config_id += lower_string(string_from_data_layout(_data_layout));
-}
-
-Status CLWinogradInputTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, winograd_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), winograd_info).first);
-
- return Status{};
-}
-
-void CLWinogradInputTransformKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- const size_t idx_w = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const size_t idx_h = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- const size_t idx_c = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
- const size_t total_batches = window.shape().total_size_upper(3);
-
- // Collapse window
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
-
- Window slice = window_collapsed.first_slice_window_3D();
- slice.set(idx_w, Window::Dimension(0, _num_tiles_x, 1));
- slice.set(idx_h, Window::Dimension(0, _num_tiles_y, 1));
- if(_data_layout == DataLayout::NHWC)
- {
- slice.set(idx_h, Window::Dimension(0, _num_tiles_y * total_batches, 1));
- }
-
- ARM_COMPUTE_ERROR_ON(((slice[idx_c].end() - slice[idx_c].start()) % _step_z) != 0);
- slice.set(idx_c, Window::Dimension(slice[idx_c].start(), slice[idx_c].end(), _step_z));
-
- unsigned int idx = 2 * num_arguments_per_3D_tensor();
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input->info()->strides_in_bytes()[3]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[3]));
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _output, slice);
-
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
-}
diff --git a/src/core/CL/kernels/CLWinogradInputTransformKernel.h b/src/core/CL/kernels/CLWinogradInputTransformKernel.h
deleted file mode 100644
index 25301877e6..0000000000
--- a/src/core/CL/kernels/CLWinogradInputTransformKernel.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLWINOGRADINPUTTRANSFORMKERNEL_H
-#define ARM_COMPUTE_CLWINOGRADINPUTTRANSFORMKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to perform Winograd input transform.*/
-class CLWinogradInputTransformKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLWinogradInputTransformKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradInputTransformKernel(const CLWinogradInputTransformKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradInputTransformKernel &operator=(const CLWinogradInputTransformKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLWinogradInputTransformKernel(CLWinogradInputTransformKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLWinogradInputTransformKernel &operator=(CLWinogradInputTransformKernel &&) = default;
- /** Set the input and output of the kernel.
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input The input tensor to transform. Data types supported: F16/F32
- * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- */
- void configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Set the input and output of the kernel.
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to transform. Data types supported: F16/F32
- * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradInputTransformKernel
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input The input tensor to transform. Data types supported: F16/F32
- * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-private:
- using WinogradKey = std::pair<std::pair<int, int>, std::pair<int, int>>;
-
- BorderSize _border_size;
- const ICLTensor *_input;
- ICLTensor *_output;
- DataLayout _data_layout;
- int _num_tiles_x;
- int _num_tiles_y;
- unsigned int _step_z;
-};
-} // arm_compute
-#endif /*ARM_COMPUTE_CLWINOGRADINPUTTRANSFORMKERNEL_H */
diff --git a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp
deleted file mode 100644
index 2018559f60..0000000000
--- a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/IAccessWindow.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include "support/StringSupport.h"
-
-#include <cmath>
-
-namespace arm_compute
-{
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_UNUSED(act_info);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
-
- ARM_COMPUTE_RETURN_ERROR_ON(output->data_layout() != winograd_info.output_data_layout);
-
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D kernel_size = winograd_info.kernel_size;
- const Size2D input_dimensions = winograd_info.input_dimensions;
- const unsigned int num_channels = (winograd_info.kernel_size.width + winograd_info.output_tile_size.width - 1) * (winograd_info.kernel_size.height + winograd_info.output_tile_size.height - 1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!cl_winograd_convolution_layer_supported(output_tile_size, kernel_size, winograd_info.output_data_layout), "Winograd output transform not supported");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->dimension(2) != num_channels, "Wrong number of channels");
-
- // Compute number of elements to process in the X and Y direction
- // Compute the number of output tiles along the x and y direction of size "output_tile_size"
- const Size2D num_tiles = compute_winograd_convolution_tiles(input_dimensions,
- kernel_size,
- output_tile_size,
- conv_info);
-
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != static_cast<unsigned int>((num_tiles.area())));
-
- if(bias != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
- }
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*input, winograd_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output, const Size2D &output_tile_size)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_UNUSED(bias);
-
- constexpr unsigned int num_elems_processed_per_iteration = 1;
-
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
- bool window_changed = false;
-
- if(output->data_layout() == DataLayout::NCHW)
- {
- const int output_static_window_end_x = ceil_to_multiple(output->dimension(0), output_tile_size.width);
- const int output_static_window_end_y = ceil_to_multiple(output->dimension(1), output_tile_size.height);
-
- AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration, num_elems_processed_per_iteration);
- AccessWindowStatic output_access(output, 0, 0, output_static_window_end_x, output_static_window_end_y);
- window_changed = update_window_and_padding(win, input_access, output_access);
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLWinogradOutputTransformKernel::CLWinogradOutputTransformKernel()
- : _input(nullptr), _bias(nullptr), _output(nullptr), _is_nhwc(false)
-{
-}
-
-void CLWinogradOutputTransformKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, winograd_info, act_info);
-}
-
-void CLWinogradOutputTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info,
- const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*input->info(), winograd_info)));
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), winograd_info, act_info));
-
- auto padding_info = get_padding_info({ input, bias, output });
-
- _input = input;
- _bias = bias;
- _output = output;
- _is_nhwc = winograd_info.output_data_layout == DataLayout::NHWC;
-
- // Compute num_tiles_x
- const Size2D input_dimensions = winograd_info.input_dimensions;
- const Size2D kernel_size = winograd_info.kernel_size;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const int idx_width = get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::HEIGHT);
-
- // Compute the number of output tiles along the x and y direction of size "output_tile_size"
- const Size2D num_tiles = compute_winograd_convolution_tiles(input_dimensions,
- kernel_size,
- output_tile_size,
- conv_info);
- const size_t total_batches = output->info()->tensor_shape().total_size_upper(3);
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
-
- if((output_tile_size.x() == 2) || (output_tile_size.x() == 1 && output_tile_size.y() == 2))
- {
- build_opts.add_option("-DVEC_SIZE=2");
- }
- else if((output_tile_size.x() == 4) || (output_tile_size.x() == 1 && output_tile_size.y() == 4))
- {
- build_opts.add_option("-DVEC_SIZE=4");
- }
-
- build_opts.add_option_if(_bias != nullptr, std::string("-DHAS_BIAS"));
- build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(num_tiles.width));
- build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
- build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(idx_width)));
- build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(idx_height)));
- build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(_input->info()->dimension(2)));
- build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL");
- build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL");
-
- // Create kernel
- std::string kernel_name = "winograd_output_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_" + lower_string(string_from_data_layout(winograd_info.output_data_layout));
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), winograd_info.output_tile_size);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(input->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += lower_string(string_from_data_layout(winograd_info.output_data_layout));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info) && _is_nhwc);
-}
-
-Status CLWinogradOutputTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, (bias != nullptr ? bias->clone().get() : nullptr), output, winograd_info, act_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), (bias != nullptr ? bias->clone().get() : nullptr), output->clone().get(), winograd_info.output_tile_size).first);
-
- return Status{};
-}
-
-void CLWinogradOutputTransformKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- // Collapse window
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
-
- // Get initial windows
- Window slice = window_collapsed.first_slice_window_4D();
- slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
-
- // Setup output slice
- Window slice_out(slice);
- slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
-
- if(_bias != nullptr)
- {
- unsigned int idx1 = 2 * num_arguments_per_4D_tensor();
- Window slice_biases;
- slice_biases.use_tensor_dimensions(_bias->info()->tensor_shape());
- add_1D_tensor_argument(idx1, _bias, slice_biases);
- }
-
- if(_is_nhwc)
- {
- unsigned int idx2 = 2 * num_arguments_per_4D_tensor() + ((_bias != nullptr) ? num_arguments_per_1D_tensor() : 0);
- _kernel.setArg(idx2, static_cast<int>(_output->info()->total_size() - _output->info()->strides_in_bytes().y()));
- }
-
- do
- {
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, slice);
- add_4D_tensor_argument(idx, _output, slice_out);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_out));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLWinogradOutputTransformKernel.h b/src/core/CL/kernels/CLWinogradOutputTransformKernel.h
deleted file mode 100644
index 632a5629d9..0000000000
--- a/src/core/CL/kernels/CLWinogradOutputTransformKernel.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLWINOGRADOUTPUTTRANSFORMKERNEL_H
-#define ARM_COMPUTE_CLWINOGRADOUTPUTTRANSFORMKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the Winograd output transform kernel. */
-class CLWinogradOutputTransformKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLWinogradOutputTransformKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradOutputTransformKernel(const CLWinogradOutputTransformKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradOutputTransformKernel &operator=(const CLWinogradOutputTransformKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLWinogradOutputTransformKernel(CLWinogradOutputTransformKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLWinogradOutputTransformKernel &operator=(CLWinogradOutputTransformKernel &&) = default;
- /** Default destructor */
- ~CLWinogradOutputTransformKernel() = default;
- /** Set the input and output tensor.
- *
- * @note Winograd output transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd output transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input Source tensor with shape [C, N, K, batches]. Data types supported: F16/F32.
- * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_output_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Set the input and output tensor.
- *
- * @note Winograd output transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd output transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor with shape [C, N, K, batches]. Data types supported: F16/F32.
- * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_output_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info,
- const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradOutputTransformKernel
- *
- * @note Winograd output transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd output transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input Source tensor with shape [C, N, K, batches]. Data types supported: F16/F32.
- * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_output_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation @ref ActivationLayerInfo. Only RELU, BOUNDED_RELU, LU_BOUNDED_RELU, LEAKY_RELU and SOFT_RELU supported.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- using WinogradKey = std::pair<std::pair<int, int>, std::pair<int, int>>;
-
- const ICLTensor *_input;
- const ICLTensor *_bias;
- ICLTensor *_output;
- bool _is_nhwc;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLWINOGRADOUTPUTTRANSFORMKERNEL_H */
diff --git a/src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h b/src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h
deleted file mode 100644
index 4c92ae417f..0000000000
--- a/src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_ICLDEPTHWISECONVOLUTIONKERNEL3x3_H
-#define ARM_COMPUTE_ICLDEPTHWISECONVOLUTIONKERNEL3x3_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor.
- */
-class ICLDepthwiseConvolutionLayer3x3Kernel : public ICLKernel
-{
-public:
- /** Default constructor */
- ICLDepthwiseConvolutionLayer3x3Kernel()
- : _border_size(0), _input(), _output(), _weights(), _biases(), _conv_stride_y(1), _output_multipliers(), _output_shifts(), _is_quantized(false)
- {
- }
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- ICLDepthwiseConvolutionLayer3x3Kernel(const ICLDepthwiseConvolutionLayer3x3Kernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- ICLDepthwiseConvolutionLayer3x3Kernel &operator=(const ICLDepthwiseConvolutionLayer3x3Kernel &) = delete;
- /** Default Move Constructor. */
- ICLDepthwiseConvolutionLayer3x3Kernel(ICLDepthwiseConvolutionLayer3x3Kernel &&) = default;
- /** Default move assignment operator */
- ICLDepthwiseConvolutionLayer3x3Kernel &operator=(ICLDepthwiseConvolutionLayer3x3Kernel &&) = default;
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] input Source tensor. DataType supported: QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [3, 3, IFM].
- * Data type supported: Same as @p input, QASYMM8/QSYMM8_PER_CHANNEL when input is QASYMM8.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported for QASYMM8.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- */
- virtual void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) = 0;
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. DataType supported: QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [3, 3, IFM].
- * Data type supported: Same as @p input, QASYMM8/QSYMM8_PER_CHANNEL when input is QASYMM8.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported for QASYMM8.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- */
- virtual void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) = 0;
-
-protected:
- BorderSize _border_size;
- const ICLTensor *_input;
- ICLTensor *_output;
- const ICLTensor *_weights;
- const ICLTensor *_biases;
- unsigned int _conv_stride_y;
- const ICLTensor *_output_multipliers;
- const ICLTensor *_output_shifts;
- bool _is_quantized;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_ICLDEPTHWISECONVOLUTIONKERNEL3x3_H */