aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2021-04-16 15:08:59 +0100
committerGian Marco Iodice <gianmarco.iodice@arm.com>2021-07-02 15:56:45 +0000
commit8155c0253c00aa9e26651361460c66feb39829a6 (patch)
tree41dacc432d4d1f1daa32d20d15e5120c11b9fa56
parent2eb5d16b839cbc28c6cb7f0de7a0bf15290b425a (diff)
downloadComputeLibrary-8155c0253c00aa9e26651361460c66feb39829a6.tar.gz
Rework OpenCL Depthwise Convolution
- Remove dedicated kernels for NCHW. Now we only use NHWC with permute - Remove specialized kernels for 3x3 NHWC - Simplify CLDepthwiseConvolutionLayer.cpp to call just the native implementation for both floating-point and quantized data types - Develop two parametric opencl kernels for depthwise convolution layer NHWC (floating-point and quantized) - Add support to export the weights to cl_image - Extend test for depthwise convolution on opencl Resolves COMPMID-4417 Change-Id: Ibe533f79c2860f9cac8e921895d5a8f947753a5c Signed-off-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5893 Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp2
-rw-r--r--arm_compute/core/CL/CLHelpers.h8
-rw-r--r--arm_compute/core/KernelDescriptors.h14
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h256
-rw-r--r--docs/user_guide/release_version_and_change_log.dox12
-rw-r--r--filelist.json14
-rw-r--r--src/core/CL/CLHelpers.cpp39
-rw-r--r--src/core/CL/CLKernelLibrary.cpp37
-rw-r--r--src/core/CL/CLKernels.h2
-rw-r--r--src/core/CL/cl_kernels/activation_quant_helpers.h9
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution.cl1781
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution_quantized.cl961
-rw-r--r--src/core/CL/cl_kernels/direct_convolution.cl3
-rw-r--r--src/core/CL/cl_kernels/dwc_native_fp_nhwc.cl212
-rw-r--r--src/core/CL/cl_kernels/dwc_native_quantized_nhwc.cl277
-rw-r--r--src/core/CL/cl_kernels/tile_helpers.h464
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp432
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h131
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp238
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h110
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp230
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h46
-rw-r--r--src/core/gpu/cl/ClKernelLibrary.cpp30
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp474
-rw-r--r--tests/datasets/DepthwiseConvolutionLayerDataset.h2
-rw-r--r--tests/validation/CL/DepthwiseConvolutionLayer.cpp7
-rw-r--r--tests/validation/CL/DepthwiseConvolutionLayerNative.cpp179
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h63
28 files changed, 1446 insertions, 4587 deletions
diff --git a/Android.bp b/Android.bp
index 5943b56450..670f0697d7 100644
--- a/Android.bp
+++ b/Android.bp
@@ -90,8 +90,6 @@ cc_library_static {
"src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp",
"src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp",
"src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp",
- "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp",
- "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp",
"src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp",
"src/core/CL/kernels/CLFFTDigitReverseKernel.cpp",
"src/core/CL/kernels/CLFFTRadixStageKernel.cpp",
diff --git a/arm_compute/core/CL/CLHelpers.h b/arm_compute/core/CL/CLHelpers.h
index a9ac6a5933..180211c558 100644
--- a/arm_compute/core/CL/CLHelpers.h
+++ b/arm_compute/core/CL/CLHelpers.h
@@ -236,5 +236,13 @@ bool get_wbsm_support_info(const cl::Device &device);
*/
void set_wbsm(cl::Kernel &kernel, cl_int wbsm_hint);
+/* Helper function to check if we can export the weights to cl_image
+ *
+ * @param[in] tensor Weights tensor
+ *
+ * @return true if we can export the weights to cl_image
+ */
+bool export_weights_to_cl_image(const ITensorInfo *tensor);
+
} // namespace arm_compute
#endif /* ARM_COMPUTE_CLHELPERS_H */
diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h
index 6c1fc74b1e..a6e5c3372e 100644
--- a/arm_compute/core/KernelDescriptors.h
+++ b/arm_compute/core/KernelDescriptors.h
@@ -96,16 +96,12 @@ struct GEMMKernelInfo
GEMMLowpOutputStageInfo output_stage{}; /**< GEMMLowp output stage information */
};
-/** Descriptor used by the depthwise convolution kernels */
-struct DWCKernelInfo
+/** Compute descriptor used by the depthwise convolution native kernel */
+struct DWCComputeKernelInfo
{
- ActivationLayerInfo activation_info{}; /**< Activation function to perform after the depthwise convolution */
-};
-
-/** Descriptor used by the depthwise convolution kernels to retrieve the number of output elements processed by each thread */
-struct DWCWeightsKernelInfo
-{
- unsigned int n0{ 0 }; /**< Number of columns processed by each thread */
+ unsigned int n0{ 0 }; /**< Number of columns processed by each thread */
+ unsigned int m0{ 0 }; /**< Number of rows processed by each thread */
+ bool export_weights_to_cl_image{ false }; /**< Export the weights to cl_image */
};
/** Descriptor used by the softmax kernels */
diff --git a/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h
index bbb00a1ebc..01ddae12bb 100644
--- a/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h
@@ -33,13 +33,14 @@
namespace arm_compute
{
class CLCompileContext;
-class CLFillBorderKernel;
class CLDepthwiseConvolutionLayerNativeKernel;
-class CLDepthwiseConvolutionLayer3x3NCHWKernel;
-class CLDepthwiseConvolutionLayer3x3NHWCKernel;
class ICLTensor;
/** Function to execute a depthwise convolution
+ *
+ * -# @ref CLDepthwiseConvolutionLayerNativeKernel
+ * -# @ref CLPermute (if the data layout is NCHW)
+ *
*/
class CLDepthwiseConvolutionLayer : public IFunction
{
@@ -102,239 +103,38 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1,
- ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
// Inherited methods overriden:
void run() override;
void prepare() override;
-private:
- /** Static function to choose the best depthwise convolution function for @ref CLDepthwiseConvolutionLayer
- *
- * @param[in] input Source tensor info. Data type supported: QASYMM8/FP16/FP32. Data layout supported: NHWC, NCHW
- * @param[in] weights Weights tensor info. These are 3D tensors with shape [kernel_x, kernel_y, IFM].
- * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor info. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8.
- * @param[in] output Destination tensor. Data type supported: same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU for 3x3 QASYMM8 supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- *
- * @return a Depthwise Convolution Function
- */
- static DepthwiseConvolutionFunction get_depthwiseconvolution_function(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1,
- ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
-
- /** Basic function to execute a depthwise convolution for kernel size 3x3xC (when data layout NCHW) or Cx3x3 (when data layout NHWC). This function calls the following OpenCL kernels:
- *
- * -# @ref CLDepthwiseConvolutionLayer3x3NCHWKernel (if data_layout == NCHW)
- * -# @ref CLDepthwiseConvolutionLayer3x3NHWCKernel (if data_layout == NHWC)
- * -# @ref CLFillBorderKernel (if pad_x or pad_y > 0)
- *
- */
- class CLDepthwiseConvolutionLayerInternal3x3 : public IFunction
- {
- public:
- /** Default constructor */
- CLDepthwiseConvolutionLayerInternal3x3(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayerInternal3x3(const CLDepthwiseConvolutionLayerInternal3x3 &) = delete;
- /** Default move constructor */
- CLDepthwiseConvolutionLayerInternal3x3(CLDepthwiseConvolutionLayerInternal3x3 &&) = default;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayerInternal3x3 &operator=(const CLDepthwiseConvolutionLayerInternal3x3 &) = delete;
- /** Default move assignment operator */
- CLDepthwiseConvolutionLayerInternal3x3 &operator=(CLDepthwiseConvolutionLayerInternal3x3 &&) = default;
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in, out] input Source tensor. Data type supported: QASYMM8/F16/F32. (Written to only for border filling).
- * @param[in] weights Weights tensor. A 3D tensor with shape [3, 3, IFM].
- * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input.
- * @param[out] output Destination tensor. Data type supported: same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU for 3x3 QASYMM8 supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- */
- void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1,
- ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in, out] input Source tensor. Data type supported: QASYMM8/F16/F32. (Written to only for border filling).
- * @param[in] weights Weights tensor. A 3D tensor with shape [3, 3, IFM].
- * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input.
- * @param[out] output Destination tensor. Data type supported: same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU for 3x3 QASYMM8 supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- */
- void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
-
- /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3
- *
- * @param[in] input Source tensor info. Data type supported: QASYMM8 for all layouts, F16/F32 for NCHW.
- * @param[in] weights Weights tensor info. A 3D tensor with shape [3, 3, IFM].
- * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor info. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8.
- * @param[in] output Destination tensor. Data type supported: same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU for 3x3 QASYMM8 supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1,
- ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
-
- // Inherited methods overriden:
- void run() override;
- void prepare() override;
-
- void set_memory_group(std::shared_ptr<IMemoryManager> memory_manager)
- {
- _memory_group = MemoryGroup(std::move(memory_manager));
- };
-
- private:
- MemoryGroup _memory_group;
- std::unique_ptr<CLDepthwiseConvolutionLayer3x3NCHWKernel> _kernel_nchw;
- std::unique_ptr<CLDepthwiseConvolutionLayer3x3NHWCKernel> _kernel_nhwc;
- std::unique_ptr<CLFillBorderKernel> _border_handler;
- CLPermute _permute_input_to_nchw;
- CLPermute _permute_weights_to_nchw;
- CLPermute _permute_output_to_nhwc;
- CLTensor _permuted_input;
- CLTensor _permuted_weights;
- CLTensor _permuted_output;
- CLTensor _output_multipliers;
- CLTensor _output_shifts;
- const ITensor *_original_weights;
- const ITensor *_input;
- const ITensor *_output;
- bool _needs_permute;
- bool _is_prepared;
- bool _is_quantized;
- bool _is_nhwc;
- };
-
- /** Basic function to execute a generic depthwise convolution. This function calls the following OpenCL kernels:
- *
- * -# @ref CLDepthwiseConvolutionLayerNativeKernel
- * -# @ref CLPermute (x 3) if the data layout is NCHW
- *
- */
- class CLDepthwiseConvolutionLayerGeneric : public IFunction
+ void set_memory_group(std::shared_ptr<IMemoryManager> memory_manager)
{
- public:
- /** Default constructor */
- CLDepthwiseConvolutionLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayerGeneric(const CLDepthwiseConvolutionLayerGeneric &) = delete;
- /** Default move constructor */
- CLDepthwiseConvolutionLayerGeneric(CLDepthwiseConvolutionLayerGeneric &&) = default;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayerGeneric &operator=(const CLDepthwiseConvolutionLayerGeneric &) = delete;
- /** Default move assignment operator */
- CLDepthwiseConvolutionLayerGeneric &operator=(CLDepthwiseConvolutionLayerGeneric &&) = default;
- /** Initialize the function's source, destination, weights and convolution information.
- *
- * @param[in, out] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F32. (Written to only for border filling).
- * @param[in] weights Weights tensor. These are 3D tensors with shape [kernel_x, kernel_y, IFM].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- */
- void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, const ActivationLayerInfo &act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
- /** Initialize the function's source, destination, weights and convolution information.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in, out] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F32. (Written to only for border filling).
- * @param[in] weights Weights tensor. These are 3D tensors with shape [kernel_x, kernel_y, IFM].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- */
- void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, const ActivationLayerInfo &act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
-
- /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerGeneric
- *
- * @param[in] input Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/F32.
- * @param[in] weights Weights tensor info. These are 3D tensors with shape [kernel_x, kernel_y, IFM].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor info. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] output Destination tensor. Data type supported: same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, const ActivationLayerInfo &act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
-
- // Inherited methods overriden:
- void run() override;
- void prepare() override;
-
- void set_memory_group(std::shared_ptr<IMemoryManager> memory_manager)
- {
- _memory_group = MemoryGroup(std::move(memory_manager));
- };
-
- private:
- MemoryGroup _memory_group;
-
- std::unique_ptr<CLDepthwiseConvolutionLayerNativeKernel> _dwc_native_kernel;
- CLPermute _permute_input_to_nhwc;
- CLPermute _permute_weights_to_nhwc;
- CLPermute _permute_output_to_nchw;
-
- CLTensor _permuted_input;
- CLTensor _permuted_weights;
- CLTensor _permuted_output;
- CLTensor _output_multipliers;
- CLTensor _output_shifts;
- const ITensor *_original_weights;
- const ITensor *_input;
- const ITensor *_output;
-
- bool _needs_permute;
- bool _is_prepared;
- bool _is_quantized;
+ _memory_group = MemoryGroup(std::move(memory_manager));
};
- std::shared_ptr<IMemoryManager> _memory_manager;
-
- DepthwiseConvolutionFunction _depth_conv_func;
- CLDepthwiseConvolutionLayerInternal3x3 _func_3x3;
- CLDepthwiseConvolutionLayerGeneric _func_generic;
+private:
+ MemoryGroup _memory_group;
+
+ std::unique_ptr<CLDepthwiseConvolutionLayerNativeKernel> _dwc_native_kernel;
+ CLPermute _permute_input_to_nhwc;
+ CLPermute _permute_weights_to_nhwc;
+ CLPermute _permute_output_to_nchw;
+
+ CLTensor _permuted_input;
+ CLTensor _permuted_weights;
+ CLTensor _permuted_output;
+ CLTensor _output_multipliers;
+ CLTensor _output_shifts;
+ const ITensor *_original_weights;
+ const ITensor *_input;
+ const ITensor *_output;
+
+ bool _needs_permute;
+ bool _is_prepared;
+ bool _is_quantized;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLDEPTHWISECONVOLUTION_H */
diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox
index 2d4d0358f2..0c8b57ff9f 100644
--- a/docs/user_guide/release_version_and_change_log.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -62,7 +62,7 @@ v21.05 Public major release
- @ref NEDeconvolutionLayer
- Remove padding from OpenCL kernels:
- @ref CLL2NormalizeLayerKernel
- - @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
+ - CLDepthwiseConvolutionLayer3x3NHWCKernel
- @ref CLNormalizationLayerKernel
- @ref CLNormalizePlanarYUVLayerKernel
- @ref opencl::kernels::ClMulKernel
@@ -271,7 +271,7 @@ v20.11 Public major release
- @ref CLDepthwiseConvolutionLayerNativeKernel
- CLDepthConvertLayerKernel
- CLCopyKernel
- - @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
+ - CLDepthwiseConvolutionLayer3x3NHWCKernel
- CLActivationLayerKernel
- CLWinogradFilterTransformKernel
- CLWidthConcatenateLayerKernel
@@ -1032,10 +1032,10 @@ v18.11 Public major release
- CLWidthConcatenateLayer
- CLFlattenLayer
- @ref CLSoftmaxLayer
- - Add dot product support for @ref CLDepthwiseConvolutionLayer3x3NHWCKernel non-unit stride
+ - Add dot product support for CLDepthwiseConvolutionLayer3x3NHWCKernel non-unit stride
- Add SVE support
- Fused batch normalization into convolution layer weights in @ref CLFuseBatchNormalization
- - Fuses activation in @ref CLDepthwiseConvolutionLayer3x3NCHWKernel, @ref CLDepthwiseConvolutionLayer3x3NHWCKernel and @ref NEGEMMConvolutionLayer
+ - Fuses activation in CLDepthwiseConvolutionLayer3x3NCHWKernel, CLDepthwiseConvolutionLayer3x3NHWCKernel and @ref NEGEMMConvolutionLayer
- Added NHWC data layout support to:
- @ref CLChannelShuffleLayer
- @ref CLDeconvolutionLayer
@@ -1045,7 +1045,7 @@ v18.11 Public major release
- NEDepthwiseConvolutionLayer3x3Kernel
- CLPixelWiseMultiplicationKernel
- Added FP16 support to the following kernels:
- - @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
+ - CLDepthwiseConvolutionLayer3x3NHWCKernel
- NEDepthwiseConvolutionLayer3x3Kernel
- @ref CLNormalizePlanarYUVLayerKernel
- @ref CLWinogradConvolutionLayer (5x5 kernel)
@@ -1286,7 +1286,7 @@ v17.09 Public major release
- NEReshapeLayerKernel / @ref NEReshapeLayer
- New OpenCL kernels / functions:
- - @ref CLDepthwiseConvolutionLayer3x3NCHWKernel @ref CLDepthwiseConvolutionLayer3x3NHWCKernel CLDepthwiseIm2ColKernel CLDepthwiseVectorToTensorKernel CLDepthwiseWeightsReshapeKernel / CLDepthwiseConvolutionLayer3x3 @ref CLDepthwiseConvolutionLayer CLDepthwiseSeparableConvolutionLayer
+ - CLDepthwiseConvolutionLayer3x3NCHWKernel CLDepthwiseConvolutionLayer3x3NHWCKernel CLDepthwiseIm2ColKernel CLDepthwiseVectorToTensorKernel CLDepthwiseWeightsReshapeKernel / CLDepthwiseConvolutionLayer3x3 @ref CLDepthwiseConvolutionLayer CLDepthwiseSeparableConvolutionLayer
- CLDequantizationLayerKernel / CLDequantizationLayer
- CLDirectConvolutionLayerKernel / @ref CLDirectConvolutionLayer
- CLFlattenLayer
diff --git a/filelist.json b/filelist.json
index 7129efbf21..7512ac12bd 100644
--- a/filelist.json
+++ b/filelist.json
@@ -389,20 +389,6 @@
]
}
},
- "DepthwiseConvolutionLayer3x3NCHW": {
- "files": {
- "kernel": [
- "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp"
- ]
- }
- },
- "DepthwiseConvolutionLayer3x3NHWC": {
- "files": {
- "kernel": [
- "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp"
- ]
- }
- },
"DepthwiseConvolutionLayerNative": {
"files": {
"kernel": [
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index 6af378c7ab..3323929742 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/CLTypes.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Log.h"
@@ -427,4 +428,42 @@ void set_wbsm(cl::Kernel &kernel, cl_int wbsm_hint)
ARM_COMPUTE_ERROR_ON(err != CL_SUCCESS);
}
+bool export_weights_to_cl_image(const ITensorInfo *tensor)
+{
+ if(tensor->tensor_shape()[0] % 4)
+ {
+ return false;
+ }
+
+ // If not floating point
+ if(!is_data_type_float(tensor->data_type()))
+ {
+ return false;
+ }
+
+ // Check if the cl_khr_image2d_from_buffer extension is supported on the target platform
+ if(!image2d_from_buffer_supported(CLKernelLibrary::get().get_device()))
+ {
+ return false;
+ }
+
+ // Check cl image pitch alignment
+ if(get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) == 0)
+ {
+ return false;
+ }
+
+ const size_t image_w = tensor->tensor_shape()[0] / 4;
+ const size_t image_h = tensor->tensor_shape()[1] * tensor->tensor_shape()[2] * tensor->tensor_shape()[3];
+ const size_t max_image_w = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>();
+ const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>();
+
+ if(image_w > max_image_w || image_h > max_image_h)
+ {
+ return false;
+ }
+
+ return true;
+}
+
} // namespace arm_compute
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index bbd4009389..d8983fcae9 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -22,16 +22,13 @@
* SOFTWARE.
*/
#include "arm_compute/core/CL/CLKernelLibrary.h"
-
#include "arm_compute/core/Error.h"
#include "src/core/gpu/cl/ClKernelLibrary.h"
-
#include <algorithm>
#include <array>
#include <fstream>
#include <utility>
#include <vector>
-
namespace arm_compute
{
CLKernelLibrary::CLKernelLibrary()
@@ -39,123 +36,99 @@ CLKernelLibrary::CLKernelLibrary()
{
opencl_is_available(); // Make sure the OpenCL symbols are initialised *before* the CLKernelLibrary is built
}
-
CLKernelLibrary &CLKernelLibrary::get()
{
static CLKernelLibrary _kernel_library;
return _kernel_library;
}
-
Kernel CLKernelLibrary::create_kernel(const std::string &kernel_name, const std::set<std::string> &build_options_set) const
{
- const opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get();
-
- const std::string program_name = klib.program_name(kernel_name);
- auto program = klib.program(program_name);
- const std::string &kernel_path = CLKernelLibrary::get().get_kernel_path();
-
+ const opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get();
+ const std::string program_name = klib.program_name(kernel_name);
+ auto program = klib.program(program_name);
+ const std::string &kernel_path = CLKernelLibrary::get().get_kernel_path();
return _compile_context.create_kernel(kernel_name, program_name, program.program, kernel_path, build_options_set, program.is_binary);
}
-
std::string CLKernelLibrary::get_program_name(const std::string &kernel_name) const
{
return opencl::ClKernelLibrary::get().program_name(kernel_name);
}
-
void CLKernelLibrary::init(std::string kernel_path, cl::Context context, cl::Device device)
{
_compile_context = CLCompileContext(context, device);
opencl::ClKernelLibrary::get().set_kernel_path(kernel_path);
}
-
void CLKernelLibrary::set_kernel_path(const std::string &kernel_path)
{
opencl::ClKernelLibrary::get().set_kernel_path(kernel_path);
}
-
cl::Context &CLKernelLibrary::context()
{
return _compile_context.context();
}
-
const cl::Device &CLKernelLibrary::get_device()
{
return _compile_context.get_device();
}
-
void CLKernelLibrary::set_device(cl::Device device)
{
_compile_context.set_device(device);
}
-
void CLKernelLibrary::set_context(cl::Context context)
{
_compile_context.set_context(context);
}
-
std::string CLKernelLibrary::get_kernel_path()
{
return opencl::ClKernelLibrary::get().kernel_path();
}
-
void CLKernelLibrary::clear_programs_cache()
{
_compile_context.clear_programs_cache();
}
-
const std::map<std::string, cl::Program> &CLKernelLibrary::get_built_programs() const
{
return _compile_context.get_built_programs();
}
-
void CLKernelLibrary::add_built_program(const std::string &built_program_name, const cl::Program &program)
{
_compile_context.add_built_program(built_program_name, program);
}
-
bool CLKernelLibrary::fp16_supported() const
{
return _compile_context.fp16_supported();
}
-
bool CLKernelLibrary::int64_base_atomics_supported() const
{
return _compile_context.int64_base_atomics_supported();
}
-
bool CLKernelLibrary::is_wbsm_supported()
{
return _compile_context.is_wbsm_supported();
}
-
std::pair<std::string, bool> CLKernelLibrary::get_program(const std::string &program_name) const
{
auto program_info = opencl::ClKernelLibrary::get().program(program_name);
return std::make_pair(std::move(program_info.program), program_info.is_binary);
}
-
size_t CLKernelLibrary::max_local_workgroup_size(const cl::Kernel &kernel) const
{
return _compile_context.max_local_workgroup_size(kernel);
}
-
cl::NDRange CLKernelLibrary::default_ndrange() const
{
return _compile_context.default_ndrange();
}
-
std::string CLKernelLibrary::get_device_version()
{
return _compile_context.get_device_version();
}
-
cl_uint CLKernelLibrary::get_num_compute_units()
{
return _compile_context.get_num_compute_units();
}
-
CLCompileContext &CLKernelLibrary::get_compile_context()
{
return _compile_context;
}
-} // namespace arm_compute
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h
index ea40239c6f..09f8109445 100644
--- a/src/core/CL/CLKernels.h
+++ b/src/core/CL/CLKernels.h
@@ -36,8 +36,6 @@
#include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
#include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
#include "src/core/CL/kernels/CLDepthToSpaceLayerKernel.h"
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h"
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h"
#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h"
#include "src/core/CL/kernels/CLFFTDigitReverseKernel.h"
#include "src/core/CL/kernels/CLFFTRadixStageKernel.h"
diff --git a/src/core/CL/cl_kernels/activation_quant_helpers.h b/src/core/CL/cl_kernels/activation_quant_helpers.h
index a32e4e94a3..c420578546 100644
--- a/src/core/CL/cl_kernels/activation_quant_helpers.h
+++ b/src/core/CL/cl_kernels/activation_quant_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,7 +51,12 @@ inline TYPE lu_brelu_op(TYPE x)
// Hard Swish Activation
inline TYPE hard_swish_op(TYPE x)
{
- return (x * ((min(max((TYPE)(x + (TYPE)3.f), (TYPE)0.f), (TYPE)6.f)) * (TYPE)0.166666667f));
+ return (x * ((min(max((TYPE)(x + (TYPE)3.f), (TYPE)0.f), (TYPE)6.f)) * (TYPE)0.166666667f));
+}
+
+inline TYPE identiy_op(TYPE x)
+{
+ return x;
}
#define ACTIVATION_OP2(op, x) op##_op(x)
diff --git a/src/core/CL/cl_kernels/depthwise_convolution.cl b/src/core/CL/cl_kernels/depthwise_convolution.cl
deleted file mode 100644
index 22a38e7094..0000000000
--- a/src/core/CL/cl_kernels/depthwise_convolution.cl
+++ /dev/null
@@ -1,1781 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#include "activation_float_helpers.h"
-
-/** Get the pointer position at a certain offset in x and y direction.
- *
- * @param[in] ptr Pointer to the starting position of the buffer
- * @param[in] x Relative X position
- * @param[in] y Relative Y position
- * @param[in] stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] stride_y Stride of the source tensor in Y dimension (in bytes)
- *
- * @return a uchar
- */
-inline __global uchar *ptr_offset(__global uchar *ptr, const int x, const int y, const int stride_x, const int stride_y)
-{
- return ptr + x * stride_x + y * stride_y;
-}
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
-#define CONVOLUTION1x3_2X1_STRIDE1(acc, src0, weights_row0) \
- ({ \
- acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0.s1, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0.s2, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0.s3, weights_row0.s2, acc.s1); \
- })
-
-#define CONVOLUTION1x3_4X1_STRIDE1(acc, src0, weights_row0) \
- ({ \
- acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0.s1, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0.s2, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0.s3, weights_row0.s2, acc.s1); \
- acc.s2 = fma(src0.s2, weights_row0.s0, acc.s2); \
- acc.s2 = fma(src0.s3, weights_row0.s1, acc.s2); \
- acc.s2 = fma(src0.s4, weights_row0.s2, acc.s2); \
- acc.s3 = fma(src0.s3, weights_row0.s0, acc.s3); \
- acc.s3 = fma(src0.s4, weights_row0.s1, acc.s3); \
- acc.s3 = fma(src0.s5, weights_row0.s2, acc.s3); \
- })
-
-#define CONVOLUTION1x3_2X1_STRIDE2(acc, src0, src1, weights_row0) \
- ({ \
- acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0.s2, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0.s3, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src1.s0, weights_row0.s2, acc.s1); \
- })
-
-#define CONVOLUTION1x3_4X1_STRIDE2(acc, src0, src1, weights_row0) \
- ({ \
- acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0.s2, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0.s3, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0.s4, weights_row0.s2, acc.s1); \
- acc.s2 = fma(src0.s4, weights_row0.s0, acc.s2); \
- acc.s2 = fma(src0.s5, weights_row0.s1, acc.s2); \
- acc.s2 = fma(src0.s6, weights_row0.s2, acc.s2); \
- acc.s3 = fma(src0.s6, weights_row0.s0, acc.s3); \
- acc.s3 = fma(src0.s7, weights_row0.s1, acc.s3); \
- acc.s3 = fma(src1.s0, weights_row0.s2, acc.s3); \
- })
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
-#define CONVOLUTION1x3_2X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \
- ({ \
- acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \
- })
-
-#define CONVOLUTION1x3_2X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \
- ({ \
- acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \
- })
-
-#define CONVOLUTION1x3_4X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \
- ({ \
- acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \
- acc.s2 = fma(src0_left.s2, weights_row0.s0, acc.s2); \
- acc.s2 = fma(src0_mid.s2, weights_row0.s1, acc.s2); \
- acc.s2 = fma(src0_right.s2, weights_row0.s2, acc.s2); \
- acc.s3 = fma(src0_left.s3, weights_row0.s0, acc.s3); \
- acc.s3 = fma(src0_mid.s3, weights_row0.s1, acc.s3); \
- acc.s3 = fma(src0_right.s3, weights_row0.s2, acc.s3); \
- })
-
-#define CONVOLUTION1x3_4X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \
- ({ \
- acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \
- acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \
- acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \
- acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \
- acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \
- acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \
- acc.s2 = fma(src0_left.s4, weights_row0.s0, acc.s2); \
- acc.s2 = fma(src0_mid.s4, weights_row0.s1, acc.s2); \
- acc.s2 = fma(src0_right.s4, weights_row0.s2, acc.s2); \
- acc.s3 = fma(src0_left.s6, weights_row0.s0, acc.s3); \
- acc.s3 = fma(src0_mid.s6, weights_row0.s1, acc.s3); \
- acc.s3 = fma(src0_right.s6, weights_row0.s2, acc.s3); \
- })
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#if defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F32)
-#if defined(CONV_STRIDE_X)
-
-#if CONV_STRIDE_X == 1
-#define convolution1x3 convolution1x3_stride_1
-#elif CONV_STRIDE_X == 2
-#define convolution1x3 convolution1x3_stride_2
-#elif CONV_STRIDE_X == 3
-#define convolution1x3 convolution1x3_stride_3
-#else /* CONV_STRIDE_X */
-#error "Stride not supported"
-#endif /* CONV_STRIDE_X */
-
-/** Compute a 1D horizontal convolution of size 3 and stride 1 for floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a float2 containing 2 convoluted values.
- */
-inline float2 convolution1x3_stride_1(__global const uchar *left_pixel,
- const float left_coeff,
- const float middle_coeff,
- const float right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
- float4 temp = vload4(0, (__global float *)left_pixel);
-
- float2 left = CONVERT(temp.s01, float2);
- float2 middle = CONVERT(temp.s12, float2);
- float2 right = CONVERT(temp.s23, float2);
- return left * (float2)left_coeff + middle * (float2)middle_coeff + right * (float2)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
- return vload2(0, (__global float *)left_pixel) * (float2)left_coeff
- + vload2(0, (__global float *)(left_pixel) + DILATION_X) * (float2)middle_coeff
- + vload2(0, (__global float *)(left_pixel) + 2 * DILATION_X) * (float2)right_coeff;
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Compute a 1D horizontal convolution of size 3 and stride 2 for floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a float2 containing 2 convoluted values.
- */
-inline float2 convolution1x3_stride_2(__global const uchar *left_pixel,
- const float left_coeff,
- const float middle_coeff,
- const float right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
- float4 temp0 = vload4(0, (__global float *)left_pixel);
- float temp1 = *((__global float *)(left_pixel + 4 * sizeof(float)));
-
- float2 left = CONVERT(temp0.s02, float2);
- float2 middle = CONVERT(temp0.s13, float2);
- float2 right = CONVERT((float2)(temp0.s2, temp1), float2);
-
- return left * (float2)left_coeff + middle * (float2)middle_coeff + right * (float2)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
- __global float *left_pixel_float = (__global float *)left_pixel;
-
- return vload4(0, left_pixel_float).s02 * (float2)left_coeff
- + vload4(0, left_pixel_float + DILATION_X).s02 * (float2)middle_coeff
- + vload4(0, left_pixel_float + DILATION_X * 2).s02 * (float2)right_coeff;
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Compute a 1D horizontal convolution of size 3 and stride 3 for floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a float2 containing 2 convoluted values.
- */
-inline float2 convolution1x3_stride_3(__global const uchar *left_pixel,
- const float left_coeff,
- const float middle_coeff,
- const float right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
- float4 temp0 = vload4(0, (__global float *)left_pixel);
- float2 temp1 = vload2(0, (__global float *)(left_pixel + 4 * sizeof(float)));
-
- float2 left = CONVERT(temp0.s03, float2);
- float2 middle = CONVERT((float2)(temp0.s1, temp1.s0), float2);
- float2 right = CONVERT((float2)(temp0.s2, temp1.s1), float2);
-
- return left * (float2)left_coeff + middle * (float2)middle_coeff + right * (float2)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
- __global float *left_pixel_float = (__global float *)left_pixel;
-
- return (float2)(*left_pixel_float, *(left_pixel_float + 3)) * (float2)left_coeff
- + (float2)(*(left_pixel_float + DILATION_X), *(left_pixel_float + DILATION_X + 3)) * (float2)middle_coeff
- + (float2)(*(left_pixel_float + DILATION_X * 2), *(left_pixel_float + DILATION_X * 2 + 3)) * (float2)right_coeff;
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Apply a 3x3 convolution matrix to a single channel F32 input image and return the result.
- *
- * Convolution matrix layout:
- *
- * [ mat0, mat1, mat2 ]\n
- * [ mat3, mat4, mat5 ]\n
- * [ mat6, mat7, mat8 ]\n
- *
- * @param[in] src A pointer to source Image structure
- * @param[in] mat0 Coefficient from the convolution matrix
- * @param[in] mat1 Coefficient from the convolution matrix
- * @param[in] mat2 Coefficient from the convolution matrix
- * @param[in] mat3 Coefficient from the convolution matrix
- * @param[in] mat4 Coefficient from the convolution matrix
- * @param[in] mat5 Coefficient from the convolution matrix
- * @param[in] mat6 Coefficient from the convolution matrix
- * @param[in] mat0 Coefficient from the convolution matrix
- * @param[in] mat7 Coefficient from the convolution matrix
- * @param[in] mat8 Coefficient from the convolution matrix
- *
- * @return a float2 containing 2 convoluted values.
- */
-inline float2 convolution3x3(
- __global const uchar *src,
- unsigned int src_stride_y,
- const float mat0, const float mat1, const float mat2,
- const float mat3, const float mat4, const float mat5,
- const float mat6, const float mat7, const float mat8)
-{
- float2 pixels;
-
- pixels = convolution1x3((src + 0 * DILATION_Y * src_stride_y), mat0, mat1, mat2);
- pixels += convolution1x3((src + 1 * DILATION_Y * src_stride_y), mat3, mat4, mat5);
- pixels += convolution1x3((src + 2 * DILATION_Y * src_stride_y), mat6, mat7, mat8);
-
- return pixels;
-}
-
-/** This OpenCL kernel computes the depthwise convolution 3x3
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: F32
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F16/F32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- float2 pixels = 0.0f;
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
-
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
-
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
- // Load the weights
- float3 weights_values0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_values1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_values2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- pixels = convolution3x3(src_addr, src_stride_y,
- weights_values0.s0, weights_values0.s1, weights_values0.s2,
- weights_values1.s0, weights_values1.s1, weights_values1.s2,
- weights_values2.s0, weights_values2.s1, weights_values2.s2);
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- float bias = *((__global float *)(vector_offset(&biases, channel)));
-
- pixels += (float2)bias;
-#endif //defined(HAS_BIAS)
-
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels, A_VAL, B_VAL), 0, (__global float *)dst.ptr);
-}
-#endif //defined(CONV_STRIDE_X)
-
-#if(DILATION_X > 1 || DILATION_Y > 1)
-
-/** Perform 3x3 convolution for stride_x=1 and stride_y=1 when DILATION_X>1 or DILATION_Y>1 for F32
- *
- * @param[in] src_addr Pointer to the starting position of where to perform the convolution
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] y_offset Offset from the source tensor from which to start convolution
- * @param[in] weights_addr Pointer from where to get weights
- * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension
- */
-inline float2 convolution_3x3_dilation_stridex1_stridey1_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes,
- const int y_offset, __global uchar *weights_addr, const int weights_stride_y)
-{
- // Load the weights
- float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- float2 pixels0 = 0.0f;
-
- float2 src00_left = vload2(0, (__global float *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0
- float2 src00_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
- float2 src00_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
-
- float2 src10_left = vload2(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1
- float2 src10_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
- float2 src10_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
-
- float2 src20_left = vload2(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2
- float2 src20_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
- float2 src20_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
-
- CONVOLUTION1x3_2X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0);
- CONVOLUTION1x3_2X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1);
- CONVOLUTION1x3_2X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2);
-
- return pixels0;
-}
-
-/** Perform 3x3 convolution for stride_x=2 and stride_y=2 when DILATION_X>1 or DILATION_Y>1 for F32
- *
- * @param[in] src_addr Pointer to the starting position of where to perform the convolution
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] y_offset Offset from the source tensor from which to start convolution
- * @param[in] weights_addr Pointer from where to get weights
- * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension
- */
-inline float2 convolution_3x3_dilation_stridex2_stridey2_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes,
- const int y_offset, __global uchar *weights_addr, const int weights_stride_y)
-{
- // Load the weights
- float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- float2 pixels0 = 0.0f;
-
- float3 src00_left = vload3(0, (__global float *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0
- float3 src00_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
- float3 src00_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
-
- float3 src10_left = vload3(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1
- float3 src10_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
- float3 src10_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
-
- float3 src20_left = vload3(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2
- float3 src20_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
- float3 src20_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
-
- CONVOLUTION1x3_2X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0);
- CONVOLUTION1x3_2X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1);
- CONVOLUTION1x3_2X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2);
-
- return pixels0;
-}
-
-#endif /* (DILATION_X > 1 || DILATION_Y > 1) */
-
-/** This OpenCL kernel is optimized for Bifrost architectures and computes the depthwise convolution 3x3 when both
- * stride_x and stride_y are equal to 1
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: F32
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_stridex1_stridey1_f32(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- float2 pixels0 = 0.0f;
- float2 pixels1 = 0.0f;
- float2 pixels2 = 0.0f;
- float2 pixels3 = 0.0f;
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
- // Load the weights
- float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- // Note: Since each work-item computes 4x2 elements, we need to load 6 rows from the input tensor
- float4 src00 = vload4(0, (__global float *)(src_addr + 0 * src_stride_y)); // Row0
- float4 src10 = vload4(0, (__global float *)(src_addr + 1 * src_stride_y)); // Row1
- float4 src20 = vload4(0, (__global float *)(src_addr + 2 * src_stride_y)); // Row2
- float4 src30 = vload4(0, (__global float *)(src_addr + 3 * src_stride_y)); // Row3
- float4 src40 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y)); // Row4
- float4 src50 = vload4(0, (__global float *)(src_addr + 5 * src_stride_y)); // Row5
-
- CONVOLUTION1x3_2X1_STRIDE1(pixels0, src00, weights_row0);
- CONVOLUTION1x3_2X1_STRIDE1(pixels0, src10, weights_row1);
- CONVOLUTION1x3_2X1_STRIDE1(pixels0, src20, weights_row2);
- CONVOLUTION1x3_2X1_STRIDE1(pixels1, src10, weights_row0);
- CONVOLUTION1x3_2X1_STRIDE1(pixels1, src20, weights_row1);
- CONVOLUTION1x3_2X1_STRIDE1(pixels1, src30, weights_row2);
- CONVOLUTION1x3_2X1_STRIDE1(pixels2, src20, weights_row0);
- CONVOLUTION1x3_2X1_STRIDE1(pixels2, src30, weights_row1);
- CONVOLUTION1x3_2X1_STRIDE1(pixels2, src40, weights_row2);
- CONVOLUTION1x3_2X1_STRIDE1(pixels3, src30, weights_row0);
- CONVOLUTION1x3_2X1_STRIDE1(pixels3, src40, weights_row1);
- CONVOLUTION1x3_2X1_STRIDE1(pixels3, src50, weights_row2);
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- //3x3 Convolution of elements starting in 0th row
- pixels0 = convolution_3x3_dilation_stridex1_stridey1_f32(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 1st row
- pixels1 = convolution_3x3_dilation_stridex1_stridey1_f32(src_addr, src_stride_x, src_stride_y, 1, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 2nd row
- pixels2 = convolution_3x3_dilation_stridex1_stridey1_f32(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 3rd row
- pixels3 = convolution_3x3_dilation_stridex1_stridey1_f32(src_addr, src_stride_x, src_stride_y, 3, weights_addr, weights_stride_y);
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- float bias = *((__global float *)(vector_offset(&biases, channel)));
-
- pixels0 += (float2)bias;
- pixels1 += (float2)bias;
- pixels2 += (float2)bias;
- pixels3 += (float2)bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels0, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 0 * dst_stride_y));
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels1, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 1 * dst_stride_y));
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels2, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 2 * dst_stride_y));
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels3, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 3 * dst_stride_y));
-}
-
-/** This OpenCL kernel is optimized for Bifrost architectures and computes the depthwise convolution 3x3 when both
- * stride_x and stride_y are equal to 2
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: F32
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_stridex2_stridey2_f32(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- float2 pixels0 = 0.0f;
- float2 pixels1 = 0.0f;
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- // Load the weights
- float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
- float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
- float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
-
- // Note: Since each work-item computes 4x2 elements, we need to load 5 rows from the input tensor
- float4 src00 = vload4(0, (__global float *)(src_addr + 0 * src_stride_y)); // Row0
- float2 src01 = vload2(2, (__global float *)(src_addr + 0 * src_stride_y)); // Row0
- float4 src10 = vload4(0, (__global float *)(src_addr + 1 * src_stride_y)); // Row1
- float2 src11 = vload2(2, (__global float *)(src_addr + 1 * src_stride_y)); // Row1
- float4 src20 = vload4(0, (__global float *)(src_addr + 2 * src_stride_y)); // Row2
- float2 src21 = vload2(2, (__global float *)(src_addr + 2 * src_stride_y)); // Row2
- float4 src30 = vload4(0, (__global float *)(src_addr + 3 * src_stride_y)); // Row3
- float2 src31 = vload2(2, (__global float *)(src_addr + 3 * src_stride_y)); // Row3
- float4 src40 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y)); // Row4
- float2 src41 = vload2(2, (__global float *)(src_addr + 4 * src_stride_y)); // Row4
-
- CONVOLUTION1x3_2X1_STRIDE2(pixels0, src00, src01, weights_row0);
- CONVOLUTION1x3_2X1_STRIDE2(pixels0, src10, src11, weights_row1);
- CONVOLUTION1x3_2X1_STRIDE2(pixels0, src20, src21, weights_row2);
- CONVOLUTION1x3_2X1_STRIDE2(pixels1, src20, src21, weights_row0);
- CONVOLUTION1x3_2X1_STRIDE2(pixels1, src30, src31, weights_row1);
- CONVOLUTION1x3_2X1_STRIDE2(pixels1, src40, src41, weights_row2);
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- //3x3 Convolution of elements starting in 0th row
- pixels0 = convolution_3x3_dilation_stridex2_stridey2_f32(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 2nd row
- pixels1 = convolution_3x3_dilation_stridex2_stridey2_f32(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y);
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- float bias = *((__global float *)(vector_offset(&biases, channel)));
-
- pixels0 += (float2)bias;
- pixels1 += (float2)bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels0, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 0 * dst_stride_y));
- vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels1, A_VAL, B_VAL), 0, (__global float *)(dst.ptr + 1 * dst_stride_y));
-}
-
-#endif // defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F32)
-
-#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F16)
-#if defined(CONV_STRIDE_X)
-#if CONV_STRIDE_X == 1
-#define convolution1x3_f16 convolution1x3_stride_1_f16
-#elif CONV_STRIDE_X == 2
-#define convolution1x3_f16 convolution1x3_stride_2_f16
-#elif CONV_STRIDE_X == 3
-#define convolution1x3_f16 convolution1x3_stride_3_f16
-#else /* CONV_STRIDE_X */
-#error "Stride not supported"
-#endif /* CONV_STRIDE_X */
-
-#if(DILATION_X > 1 || DILATION_Y > 1)
-
-/** Perform 3x3 convolution for stride_x=1 and stride_y=1 when DILATION_X>1 or DILATION_Y>1 for f16
- *
- * @param[in] src_addr Pointer to the starting position of where to perform the convolution
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] y_offset Offset from the source tensor from which to start convolution
- * @param[in] weights_addr Pointer from where to get weights
- * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension
- */
-inline half4 convolution_3x3_dilation_stridex1_stridey1_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes,
- const int y_offset, __global uchar *weights_addr, const int weights_stride_y)
-{
- // Load the weights
- half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y));
- half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y));
- half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y));
-
- half4 pixels0 = 0.0f;
-
- half4 src00_left = vload4(0, (__global half *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0
- half4 src00_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
- half4 src00_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
-
- half4 src10_left = vload4(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1
- half4 src10_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
- half4 src10_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
-
- half4 src20_left = vload4(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2
- half4 src20_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
- half4 src20_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
-
- CONVOLUTION1x3_4X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0);
- CONVOLUTION1x3_4X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1);
- CONVOLUTION1x3_4X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2);
-
- return pixels0;
-}
-
-/** Perform 3x3 convolution for stride_x=2 and stride_y=2 when DILATION_X>1 or DILATION_Y>1 for F16
- *
- * @param[in] src_addr Pointer to the starting position of where to perform the convolution
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] y_offset Offset from the source tensor from which to start convolution
- * @param[in] weights_addr Pointer from where to get weights
- * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension
- */
-inline half4 convolution_3x3_dilation_stridex2_stridey2_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes,
- const int y_offset, __global uchar *weights_addr, const int weights_stride_y)
-{
- // Load the weights
- half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y));
- half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y));
- half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y));
-
- half4 pixels0 = 0.0f;
-
- half8 src00_left = vload8(0, (__global half *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0
- half8 src00_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
- half8 src00_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes));
-
- half8 src10_left = vload8(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1
- half8 src10_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
- half8 src10_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes));
-
- half8 src20_left = vload8(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2
- half8 src20_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
- half8 src20_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes));
-
- CONVOLUTION1x3_4X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0);
- CONVOLUTION1x3_4X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1);
- CONVOLUTION1x3_4X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2);
-
- return pixels0;
-}
-
-#endif // (DILATION_X > 1 && DILATION_Y > 1)
-
-/** Compute a 1D horizontal convolution of size 3 and stride 1 for 16bit floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a half4 containing 4 convoluted values.
- */
-inline half4 convolution1x3_stride_1_f16(__global const uchar *left_pixel,
- const half left_coeff,
- const half middle_coeff,
- const half right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- half8 temp = vload8(0, (__global half *)left_pixel);
-
- half4 left = CONVERT(temp.s0123, half4);
- half4 middle = CONVERT(temp.s1234, half4);
- half4 right = CONVERT(temp.s2345, half4);
-
- return left * (half4)left_coeff + middle * (half4)middle_coeff + right * (half4)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
- return vload4(0, (__global half *)left_pixel) * (half4)left_coeff
- + vload4(0, (__global half *)(left_pixel) + DILATION_X) * (half4)middle_coeff
- + vload4(0, (__global half *)(left_pixel) + 2 * DILATION_X) * (half4)right_coeff;
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Compute a 1D horizontal convolution of size 3 and stride 2 for 16bit floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a half4 containing 4 convoluted values.
- */
-inline half4 convolution1x3_stride_2_f16(__global const uchar *left_pixel,
- const half left_coeff,
- const half middle_coeff,
- const half right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- half8 temp0 = vload8(0, (__global half *)left_pixel);
- half temp1 = *((__global half *)(left_pixel + 8 * sizeof(half)));
-
- half4 left = CONVERT(temp0.s0246, half4);
- half4 middle = CONVERT(temp0.s1357, half4);
- half4 right = CONVERT((half4)(temp0.s246, temp1), half4);
-
- return left * (half4)left_coeff + middle * (half4)middle_coeff + right * (half4)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- __global half *left_pixel_float = (__global half *)left_pixel;
-
- return (half4)(*left_pixel_float, *(left_pixel_float + 2), *(left_pixel_float + 4), *(left_pixel_float + 6)) * (half4)left_coeff
- + (half4)(*(left_pixel_float + DILATION_X), *(left_pixel_float + DILATION_X + 2), *(left_pixel_float + DILATION_X + 4), *(left_pixel_float + DILATION_X + 6)) * (half4)middle_coeff
- + (half4)(*(left_pixel_float + DILATION_X * 2), *(left_pixel_float + DILATION_X * 2 + 2), *(left_pixel_float + DILATION_X * 2 + 4), *(left_pixel_float + DILATION_X * 2 + 6)) * (half4)right_coeff;
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Compute a 1D horizontal convolution of size 3 and stride 3 for 16bit floating point type.
- *
- * @param[in] left_pixel Pointer to the left pixel.
- * @param[in] left_coeff Weight of the left pixel
- * @param[in] middle_coeff Weight of the middle pixel
- * @param[in] right_coeff Weight of the right pixel
- *
- * @return a half4 containing 4 convoluted values.
- */
-inline half4 convolution1x3_stride_3_f16(__global const uchar *left_pixel,
- const half left_coeff,
- const half middle_coeff,
- const half right_coeff)
-{
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- half16 temp0 = vload16(0, (__global half *)left_pixel);
-
- half4 left = CONVERT(temp0.s0369, half4);
- half4 middle = CONVERT(temp0.s147A, half4);
- half4 right = CONVERT(temp0.s258B, half4);
-
- return left * (half4)left_coeff + middle * (half4)middle_coeff + right * (half4)right_coeff;
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- __global half *left_pixel_float = (__global half *)left_pixel;
-
- return (half4)(*left_pixel_float, *(left_pixel_float + 3), *(left_pixel_float + 6), *(left_pixel_float + 9)) * (half4)left_coeff
- + (half4)(*(left_pixel_float + DILATION_X), *(left_pixel_float + DILATION_X + 3), *(left_pixel_float + DILATION_X + 6), *(left_pixel_float + DILATION_X + 9)) * (half4)middle_coeff
- + (half4)(*(left_pixel_float + DILATION_X * 2), *(left_pixel_float + DILATION_X * 2 + 3), *(left_pixel_float + DILATION_X * 2 + 6), *(left_pixel_float + DILATION_X * 2 + 9)) * (half4)right_coeff;
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-}
-
-/** Apply a 3x3 convolution matrix to a single channel F16 input image and return the result.
- *
- * Convolution matrix layout:
- *
- * [ mat0, mat1, mat2 ]\n
- * [ mat3, mat4, mat5 ]\n
- * [ mat6, mat7, mat8 ]\n
- *
- * @param[in] src A pointer to source Image structure
- * @param[in] mat0 Coefficient from the convolution matrix
- * @param[in] mat1 Coefficient from the convolution matrix
- * @param[in] mat2 Coefficient from the convolution matrix
- * @param[in] mat3 Coefficient from the convolution matrix
- * @param[in] mat4 Coefficient from the convolution matrix
- * @param[in] mat5 Coefficient from the convolution matrix
- * @param[in] mat6 Coefficient from the convolution matrix
- * @param[in] mat0 Coefficient from the convolution matrix
- * @param[in] mat7 Coefficient from the convolution matrix
- * @param[in] mat8 Coefficient from the convolution matrix
- *
- * @return a half4 containing 4 convoluted values.
- */
-inline half4 convolution3x3_f16(
- __global uchar *src, uint src_stride_y,
- const half mat0, const half mat1, const half mat2,
- const half mat3, const half mat4, const half mat5,
- const half mat6, const half mat7, const half mat8)
-{
- half4 pixels;
-
- pixels = convolution1x3_f16(src, mat0, mat1, mat2);
- pixels += convolution1x3_f16(src + DILATION_Y * src_stride_y, mat3, mat4, mat5);
- pixels += convolution1x3_f16(src + DILATION_Y * 2 * src_stride_y, mat6, mat7, mat8);
-
- return pixels;
-}
-
-#if defined(DEPTH_MULTIPLIER)
-
-/** This OpenCL kernel computes the depthwise convolution 3x3
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types: half.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F16
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_f16(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-#endif //defined(HAS_BIAS)
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
-
- uchar3 offset = (uchar3)(0, 1, 2) * (uchar3)weights_stride_y;
- half3 weights_values0 = vload3(0, (__global half *)(weights_addr + offset.s0));
- half3 weights_values1 = vload3(0, (__global half *)(weights_addr + offset.s1));
- half3 weights_values2 = vload3(0, (__global half *)(weights_addr + offset.s2));
-
- half4 pixels = convolution3x3_f16(src_addr, src_stride_y, weights_values0.s0, weights_values0.s1, weights_values0.s2,
- weights_values1.s0, weights_values1.s1, weights_values1.s2,
- weights_values2.s0, weights_values2.s1, weights_values2.s2);
-#if defined(HAS_BIAS)
- pixels += (half4)(*((__global half *)(biases.ptr + channel * biases_stride_x)));
-#endif //defined(HAS_BIAS)
-
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels, A_VAL, B_VAL), 0, (__global half *)dst.ptr);
-}
-#endif // defined(DEPTH_MULTIPLIER)
-#endif // defined(CONV_STRIDE_X)
-
-/** This OpenCL kernel is optimized for Bifrost architectures and computes the 16bit floating point depthwise convolution 3x3
- * when both stride_x and stride_y are equal to 1
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types: half.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as @p src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_stridex1_stridey1_f16(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- half bias = *((__global half *)(vector_offset(&biases, channel)));
-#endif /* defined(HAS_BIAS) */
-
- half4 pixels0 = 0.0f;
- half4 pixels1 = 0.0f;
- half4 pixels2 = 0.0f;
- half4 pixels3 = 0.0f;
-
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
- // Load the weights
- half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y));
- half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y));
- half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y));
-
- // Note: Since each work-item computes 4x4 elements, we need to load 6 rows from the input tensor
- half8 src00 = vload8(0, (__global half *)(src_addr + 0 * src_stride_y)); // Row0
- half8 src10 = vload8(0, (__global half *)(src_addr + 1 * src_stride_y)); // Row1
- half8 src20 = vload8(0, (__global half *)(src_addr + 2 * src_stride_y)); // Row2
- half8 src30 = vload8(0, (__global half *)(src_addr + 3 * src_stride_y)); // Row3
- half8 src40 = vload8(0, (__global half *)(src_addr + 4 * src_stride_y)); // Row4
- half8 src50 = vload8(0, (__global half *)(src_addr + 5 * src_stride_y)); // Row5
-
- CONVOLUTION1x3_4X1_STRIDE1(pixels0, src00, weights_row0);
- CONVOLUTION1x3_4X1_STRIDE1(pixels0, src10, weights_row1);
- CONVOLUTION1x3_4X1_STRIDE1(pixels0, src20, weights_row2);
- CONVOLUTION1x3_4X1_STRIDE1(pixels1, src10, weights_row0);
- CONVOLUTION1x3_4X1_STRIDE1(pixels1, src20, weights_row1);
- CONVOLUTION1x3_4X1_STRIDE1(pixels1, src30, weights_row2);
- CONVOLUTION1x3_4X1_STRIDE1(pixels2, src20, weights_row0);
- CONVOLUTION1x3_4X1_STRIDE1(pixels2, src30, weights_row1);
- CONVOLUTION1x3_4X1_STRIDE1(pixels2, src40, weights_row2);
- CONVOLUTION1x3_4X1_STRIDE1(pixels3, src30, weights_row0);
- CONVOLUTION1x3_4X1_STRIDE1(pixels3, src40, weights_row1);
- CONVOLUTION1x3_4X1_STRIDE1(pixels3, src50, weights_row2);
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
-
- //3x3 Convolution of elements starting in 0th row
- pixels0 = convolution_3x3_dilation_stridex1_stridey1_f16(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 1st row
- pixels1 = convolution_3x3_dilation_stridex1_stridey1_f16(src_addr, src_stride_x, src_stride_y, 1, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 2nd row
- pixels2 = convolution_3x3_dilation_stridex1_stridey1_f16(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 3rd row
- pixels3 = convolution_3x3_dilation_stridex1_stridey1_f16(src_addr, src_stride_x, src_stride_y, 3, weights_addr, weights_stride_y);
-
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#ifdef HAS_BIAS
- pixels0 += (half4)bias;
- pixels1 += (half4)bias;
- pixels2 += (half4)bias;
- pixels3 += (half4)bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels0, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 0 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels1, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 1 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels2, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 2 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels3, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 3 * dst_stride_y));
-}
-
-/** This OpenCL kernel is optimized for Bifrost architectures and computes 16bit floating point the depthwise convolution 3x3
- * when both stride_x and stride_y are equal to 2
- *
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note If activation function is enabled, the data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types: half.
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as @p src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_stridex2_stridey2_f16(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- half bias = *((__global half *)(vector_offset(&biases, channel)));
-#endif /* defined(HAS_BIAS) */
-
- half4 pixels0 = 0.0f;
- half4 pixels1 = 0.0f;
-
- // Load relevant input and weights data ( Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) *
- (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
-
-#if(DILATION_X == 1 && DILATION_Y == 1)
-
- // Load the weights
- half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y));
- half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y));
- half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y));
-
- // Note: Since each work-item computes 2x4 elements, we need to load 5 rows from the input tensor
- half8 src00 = vload8(0, (__global half *)(src_addr + 0 * src_stride_y)); // Row0
- half2 src01 = vload2(4, (__global half *)(src_addr + 0 * src_stride_y)); // Row0
- half8 src10 = vload8(0, (__global half *)(src_addr + 1 * src_stride_y)); // Row1
- half2 src11 = vload2(4, (__global half *)(src_addr + 1 * src_stride_y)); // Row1
- half8 src20 = vload8(0, (__global half *)(src_addr + 2 * src_stride_y)); // Row2
- half2 src21 = vload2(4, (__global half *)(src_addr + 2 * src_stride_y)); // Row2
- half8 src30 = vload8(0, (__global half *)(src_addr + 3 * src_stride_y)); // Row3
- half2 src31 = vload2(4, (__global half *)(src_addr + 3 * src_stride_y)); // Row3
- half8 src40 = vload8(0, (__global half *)(src_addr + 4 * src_stride_y)); // Row4
- half2 src41 = vload2(4, (__global half *)(src_addr + 4 * src_stride_y)); // Row4
-
- CONVOLUTION1x3_4X1_STRIDE2(pixels0, src00, src01, weights_row0);
- CONVOLUTION1x3_4X1_STRIDE2(pixels0, src10, src11, weights_row1);
- CONVOLUTION1x3_4X1_STRIDE2(pixels0, src20, src21, weights_row2);
- CONVOLUTION1x3_4X1_STRIDE2(pixels1, src20, src21, weights_row0);
- CONVOLUTION1x3_4X1_STRIDE2(pixels1, src30, src31, weights_row1);
- CONVOLUTION1x3_4X1_STRIDE2(pixels1, src40, src41, weights_row2);
-
-#else /* DILATION_X==1 && DILATION_Y==1 */
- //3x3 Convolution of elements starting in 0th row
- pixels0 = convolution_3x3_dilation_stridex2_stridey2_f16(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y);
- //3x3 Convolution of elements starting in 2nd row
- pixels1 = convolution_3x3_dilation_stridex2_stridey2_f16(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y);
-#endif /* DILATION_X==1 && DILATION_Y==1 */
-
-#ifdef HAS_BIAS
- pixels0 += (half4)bias;
- pixels1 += (half4)bias;
-#endif /* defined(HAS_BIAS) */
-
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels0, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 0 * dst_stride_y));
- vstore4(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, pixels1, A_VAL, B_VAL), 0, (__global half *)(dst.ptr + 1 * dst_stride_y));
-}
-#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F16)
-
-#if defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(N0) && defined(DATA_TYPE) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(VEC_SIZE_LEFTOVER)
-/** This function computes the depthwise convolution for NHWC data layout. This kernel assumes that the weights tensor is NOT reshaped
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The number of elements processed must be passed at compile time using -DN0 (e.g. -DN0=2)
- * @note The depth multiplier must be passed at compile time using -DDEPTH_MULTIPLIER (e.g. -DDEPTH_MULTIPLIER=1)
- * @note The first dimension of the input tensor must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM1=112)
- * @note The second dimension of the input tensor must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM2=80)
- * @note The kernel width must be passed at compile time using -DKERNEL_WIDTH (e.g. -DKERNEL_WIDTH=5)
- * @note The kernel height must be passed at compile time using -DKERNEL_HEIGHT (e.g. -DKERNEL_HEIGHT=5)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
- * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X)
- * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1)
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F16/F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void dwc_MxN_native_fp_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif // defined(HAS_BIAS)
-)
-{
- int x_offs = max((int)(get_global_id(0) * N0 - (N0 - VEC_SIZE_LEFTOVER) % N0), 0) * sizeof(DATA_TYPE);
-
- int x = get_global_id(0); // channels
- int y = get_global_id(1); // spatial coordinate x
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + x_offs;
-
- __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * (int)DEPTH_MULTIPLIER + y * dst_stride_y + z * dst_stride_z;
-
- __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + x_offs * (int)DEPTH_MULTIPLIER;
-
-#if defined(HAS_BIAS)
- __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + x_offs * (int)DEPTH_MULTIPLIER;
-#endif // defined(HAS_BIAS)
-
-#if defined(DST_DEPTH)
- s_addr += b * src_stride_w;
- d_addr += b * dst_stride_w;
-#endif // defined(DST_DEPTH)
-
- for(int d = 0; d < (int)DEPTH_MULTIPLIER; ++d)
- {
- // Each work-item computes N0x1x1 elements
- VEC_DATA_TYPE(DATA_TYPE, N0)
- res0 = 0;
-
- int x_coord = y * CONV_STRIDE_X - (int)CONV_PAD_LEFT;
- int y_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP;
-
- for(int yk = 0; yk < KERNEL_HEIGHT; ++yk)
- {
- if(y_coord >= 0 && y_coord < SRC_DIM2)
- {
- int x_coord_tmp = x_coord;
-
- for(int xk = 0; xk < KERNEL_WIDTH; ++xk)
- {
- if(x_coord_tmp >= 0 && x_coord_tmp < SRC_DIM1)
- {
- int s_offset = x_coord_tmp * (int)src_stride_y + y_coord * (int)src_stride_z;
- int w_offset = xk * weights_stride_y + yk * weights_stride_z;
-
- // Load input and weights values
- VEC_DATA_TYPE(DATA_TYPE, N0)
- i = VLOAD(N0)(0, (__global DATA_TYPE *)(s_addr + s_offset));
- VEC_DATA_TYPE(DATA_TYPE, N0)
- w = VLOAD(N0)(0, (__global DATA_TYPE *)(w_addr + w_offset));
-
-#if GPU_ARCH == GPU_ARCH_MIDGARD
- res0 += i * w;
-#else // GPU_ARCH == GPU_ARCH_MIDGARD
- res0 = fma(i, w, res0);
-#endif // GPU_ARCH == GPU_ARCH_MIDGARD
- }
- x_coord_tmp += DILATION_X;
- }
- }
- y_coord += DILATION_Y;
- }
-
-#if defined(HAS_BIAS)
- res0 += VLOAD(N0)(0, (__global DATA_TYPE *)(b_addr));
-#endif // defined(HAS_BIAS)
-
- res0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, N0, res0, A_VAL, B_VAL);
-
- STORE_VECTOR_SELECT(res, DATA_TYPE, d_addr, N0, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
-
- w_addr += sizeof(DATA_TYPE);
- d_addr += sizeof(DATA_TYPE);
-#if defined(HAS_BIAS)
- b_addr += sizeof(DATA_TYPE);
-#endif // defined(HAS_BIAS)
- }
-}
-#endif // defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defiend(N0) && defined(DATA_TYPE) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(VEC_SIZE_LEFTOVER)
-
-#if defined(VEC_SIZE) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) && defined(DATA_TYPE)
-
-#if DATA_TYPE != float || DATA_TYPE != half
-#error "Unsupported data type"
-#endif // DATA_TYPE != float || DATA_TYPE != half
-
-#define VEC_FLOAT VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-
-#define FILL_ZERO_OUT_OF_BOUND_3(data_type, vec_size, basename, cond) \
- ({ \
- basename##0 = select(basename##0, (VEC_DATA_TYPE(data_type, vec_size))0, (SELECT_VEC_DATA_TYPE(data_type, vec_size))((cond).s0)); \
- basename##1 = select(basename##1, (VEC_DATA_TYPE(data_type, vec_size))0, (SELECT_VEC_DATA_TYPE(data_type, vec_size))((cond).s1)); \
- basename##2 = select(basename##2, (VEC_DATA_TYPE(data_type, vec_size))0, (SELECT_VEC_DATA_TYPE(data_type, vec_size))((cond).s2)); \
- })
-
-#define FILL_ZERO_OUT_OF_BOUND_4(data_type, vec_size, basename, cond) \
- ({ \
- FILL_ZERO_OUT_OF_BOUND_3(data_type, vec_size, basename, cond); \
- basename##3 = select(basename##3, (VEC_DATA_TYPE(data_type, vec_size))0, (SELECT_VEC_DATA_TYPE(data_type, vec_size))((cond).s3)); \
- })
-
-#if defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y)
-
-/** This function computes the depthwise convolution for NHWC data layout when the stride along the width or height is not 1.
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
- * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X)
- * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1)
- * @note The dilation_x and dilation_y must be passed at compile time using -DDILATION_X and -DDILATION_Y: e.g. -DDILATION_X=1, -DDILATION_Y=1
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note In case of biases, -DHAS_BIAS must to be passed at compile
- * @note If the output tensor has more than three dimensions, its third dimension must be passed at compile time using -DDST_DEPTH (e.g. -DDST_DEPTH=32)
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F16/F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] max_offset Max offset for the input tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif /* defined(HAS_BIAS) */
-)
-{
- int x_offset = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - PARTIAL_STORE_N0) % VEC_SIZE), 0) * sizeof(DATA_TYPE);
- int y = get_global_id(1); // spatial coordinate x
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x_offset;
-
-#if defined(DST_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offset + b * src_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offset;
-#endif /* defined(DST_DEPTH) */
-
- int3 src_coord_y = (int3)(y * CONV_STRIDE_X - CONV_PAD_LEFT) + (int3)(0, DILATION_X, 2 * DILATION_X);
- int3 src_coord_z = (int3)(z * CONV_STRIDE_Y - CONV_PAD_TOP) + (int3)(0, DILATION_Y, 2 * DILATION_Y);
-
- int3 src_offset_y = clamp(src_coord_y, (int3)0, (int3)(SRC_DIM_1 - 1));
- int3 src_offset_z = clamp(src_coord_z, (int3)0, (int3)(SRC_DIM_2 - 1));
-
- // Use these vectors to check whether the unclamped load would have been out of bounds
- src_coord_y = (src_offset_y != src_coord_y);
- src_coord_z = (src_offset_z != src_coord_z);
-
- src_offset_y *= (int3)src_stride_y;
- src_offset_z *= (int3)src_stride_z;
-
- // We compute VEC_SIZEx1x1 [C,W,H] elements
- VEC_FLOAT acc0 = 0;
-
- // Load weights
- VEC_FLOAT w0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 2 * weights_stride_z));
- VEC_FLOAT w7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 2 * weights_stride_z));
- VEC_FLOAT w8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 2 * weights_stride_z));
-
- // Load input values
- // z == 0
- VEC_FLOAT values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s0));
- VEC_FLOAT values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s1));
- VEC_FLOAT values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s2));
-
- FILL_ZERO_OUT_OF_BOUND_3(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int3)src_coord_z.s0);
-
- acc0 = fma(values0, w0, acc0);
- acc0 = fma(values1, w1, acc0);
- acc0 = fma(values2, w2, acc0);
-
- // z == 1
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s2));
-
- FILL_ZERO_OUT_OF_BOUND_3(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int3)src_coord_z.s1);
-
- acc0 = fma(values0, w3, acc0);
- acc0 = fma(values1, w4, acc0);
- acc0 = fma(values2, w5, acc0);
-
- // z == 2
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s2));
-
- FILL_ZERO_OUT_OF_BOUND_3(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int3)src_coord_z.s2);
-
- acc0 = fma(values0, w6, acc0);
- acc0 = fma(values1, w7, acc0);
- acc0 = fma(values2, w8, acc0);
-
-#if defined(HAS_BIAS)
- __global uchar *biases_addr = biases_ptr + biases_offset_first_element_in_bytes + x_offset;
- VEC_FLOAT bias_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)biases_addr);
- acc0 += bias_values;
-#endif // defined(HAS_BIAS)
-
-#if defined(DST_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offset + y * dst_step_y + z * dst_step_z + b * dst_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offset + y * dst_step_y + z * dst_step_z;
-#endif /* defined(DST_DEPTH) */
-
- acc0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc0, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(acc, DATA_TYPE, dst_addr, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
-}
-#endif // defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y)
-
-#if defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED)
-/** This function computes the depthwise convolution for NHWC data layout when the stride along the width and height is 1.
- *
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
- * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2)
- * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112)
- * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2)
- * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size
- * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
- * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
- * @note The size of the output's second dimension must be passed at compile time using -DDST_DIM_1 (e.g. -DDST_DIM_1=64)
- * @note The size of the output's third dimension must be passed at compile time using -DDST_DIM_2 (e.g. -DDST_DIM_2=32)
- * @note In case of biases, -DHAS_BIAS must to be passed at compile
- * @note If the output tensor has more than three dimensions, its third dimension must be passed at compile time using -DDST_DEPTH (e.g. -DDST_DEPTH=32)
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F16/F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] max_offset Max offset for the input tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_3x3_nhwc_stride1(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif /* defined(HAS_BIAS) */
-)
-{
- int x_offset = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - PARTIAL_STORE_N0) % VEC_SIZE), 0) * sizeof(DATA_TYPE);
- int y = get_global_id(1); // spatial coordinate x
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x_offset;
-
-#if defined(DST_DEPTH)
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offset + b * src_stride_w;
-#else /* defined(DST_DEPTH) */
- __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offset;
-#endif /* defined(DST_DEPTH) */
-
- int4 src_coord_y = (int4)(y * NUM_ROWS_PROCESSED - CONV_PAD_LEFT) + V_OFFS4(int);
- int4 src_coord_z = (int4)(z * NUM_PLANES_PROCESSED - CONV_PAD_TOP) + V_OFFS4(int);
-
- int4 src_offset_y = clamp(src_coord_y, (int4)0, (int4)(SRC_DIM_1 - 1));
- int4 src_offset_z = clamp(src_coord_z, (int4)0, (int4)(SRC_DIM_2 - 1));
-
- // Use these vectors to check whether the unclamped load would have been out of bounds
- src_coord_y = (src_offset_y != src_coord_y);
- src_coord_z = (src_offset_z != src_coord_z);
-
- src_offset_y *= (int4)src_stride_y;
- src_offset_z *= (int4)src_stride_z;
-
- // We compute VEC_SIZEx2x2 [C,W,H] elements
- VEC_FLOAT acc0 = 0;
- VEC_FLOAT acc1 = 0;
- VEC_FLOAT acc2 = 0;
- VEC_FLOAT acc3 = 0;
-
- // Load weights
- VEC_FLOAT w0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 0 * weights_stride_z));
- VEC_FLOAT w3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 1 * weights_stride_z));
- VEC_FLOAT w6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y + 2 * weights_stride_z));
- VEC_FLOAT w7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y + 2 * weights_stride_z));
- VEC_FLOAT w8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y + 2 * weights_stride_z));
-
- // Load input values
- // z == 0
- VEC_FLOAT values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s0));
- VEC_FLOAT values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s1));
- VEC_FLOAT values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s2));
- VEC_FLOAT values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s0 + src_offset_y.s3));
-
- FILL_ZERO_OUT_OF_BOUND_4(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int4)src_coord_z.s0);
-
- acc0 = fma(values0, w0, acc0);
- acc0 = fma(values1, w1, acc0);
- acc0 = fma(values2, w2, acc0);
- acc1 = fma(values1, w0, acc1);
- acc1 = fma(values2, w1, acc1);
- acc1 = fma(values3, w2, acc1);
-
- // z == 1
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s2));
- values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s1 + src_offset_y.s3));
-
- FILL_ZERO_OUT_OF_BOUND_4(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int4)src_coord_z.s1);
-
- acc0 = fma(values0, w3, acc0);
- acc0 = fma(values1, w4, acc0);
- acc0 = fma(values2, w5, acc0);
- acc1 = fma(values1, w3, acc1);
- acc1 = fma(values2, w4, acc1);
- acc1 = fma(values3, w5, acc1);
-
- acc2 = fma(values0, w0, acc2);
- acc2 = fma(values1, w1, acc2);
- acc2 = fma(values2, w2, acc2);
- acc3 = fma(values1, w0, acc3);
- acc3 = fma(values2, w1, acc3);
- acc3 = fma(values3, w2, acc3);
-
- // z == 2
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s2));
- values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s2 + src_offset_y.s3));
-
- FILL_ZERO_OUT_OF_BOUND_4(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int4)src_coord_z.s2);
-
- acc0 = fma(values0, w6, acc0);
- acc0 = fma(values1, w7, acc0);
- acc0 = fma(values2, w8, acc0);
- acc1 = fma(values1, w6, acc1);
- acc1 = fma(values2, w7, acc1);
- acc1 = fma(values3, w8, acc1);
-
- acc2 = fma(values0, w3, acc2);
- acc2 = fma(values1, w4, acc2);
- acc2 = fma(values2, w5, acc2);
- acc3 = fma(values1, w3, acc3);
- acc3 = fma(values2, w4, acc3);
- acc3 = fma(values3, w5, acc3);
-
- // z == 3
- values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s3 + src_offset_y.s0));
- values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s3 + src_offset_y.s1));
- values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s3 + src_offset_y.s2));
- values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + src_offset_z.s3 + src_offset_y.s3));
-
- FILL_ZERO_OUT_OF_BOUND_4(DATA_TYPE, VEC_SIZE, values, src_coord_y | (int4)src_coord_z.s3);
-
- acc2 = fma(values0, w6, acc2);
- acc2 = fma(values1, w7, acc2);
- acc2 = fma(values2, w8, acc2);
- acc3 = fma(values1, w6, acc3);
- acc3 = fma(values2, w7, acc3);
- acc3 = fma(values3, w8, acc3);
-
-#if defined(HAS_BIAS)
- __global uchar *biases_addr = biases_ptr + biases_offset_first_element_in_bytes + x_offset;
-
- VEC_FLOAT bias_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)biases_addr);
-
- acc0 += bias_values;
- acc1 += bias_values;
- acc2 += bias_values;
- acc3 += bias_values;
-#endif // defined(HAS_BIAS)
-
- int2 dst_offset_y = min((int2)(y * NUM_ROWS_PROCESSED) + V_OFFS2(int), (int2)(DST_DIM_1 - 1)) * (int2)dst_stride_y;
- int dst_coord_z = z * NUM_PLANES_PROCESSED;
-
-#if defined(DST_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offset + dst_coord_z * dst_stride_z + b * dst_stride_w;
-#else // defined(DST_DEPTH)
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offset + dst_coord_z * dst_stride_z;
-#endif // defined(DST_DEPTH)
-
- /* Store vectors in reverse order along the Y. The Y offsets are calculated so that they are forced to be in bound.
- * If only the first address is in bound, the Y offset of the second address will be brought back and there will be 2 writes in the same location for the same thread.
- * Since the last vector to be written is always the valid one for that location, it overwrites the wrong values.
- */
- values0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc1, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr + dst_offset_y.s1, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
-
- values0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc0, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr + dst_offset_y.s0, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
-
-#if((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
- if((dst_coord_z + 1) < DST_DIM_2)
-#endif // ((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0)
- {
- values0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc3, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr + dst_stride_z + dst_offset_y.s1, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
-
- values0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc2, A_VAL, B_VAL);
- STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr + dst_stride_z + dst_offset_y.s0, VEC_SIZE, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0)
- }
-}
-
-#endif // defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED)
-#endif // defined(VEC_SIZE) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) && defined(DATA_TYPE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
deleted file mode 100644
index 000dce1590..0000000000
--- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
+++ /dev/null
@@ -1,961 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "helpers_asymm.h"
-
-#ifndef VEC_SIZE
-#if defined(N0)
-#define VEC_SIZE N0
-#else /* defined(N0) */
-#define VEC_SIZE 8
-#endif /* defined(N0) */
-#endif /* VEC_SIZE */
-
-#if defined(ACTIVATION_TYPE) && defined(CONST_0)
-#include "activation_layer_quant.cl"
-#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QUANT(ACTIVATION_TYPE, x)
-#else /* defined(ACTIVATION_TYPE) && defined(CONST_0) */
-#define ACTIVATION_FUNC(x) (x)
-#endif /* defined(ACTIVATION_TYPE) && defined(CONST_0) */
-
-#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
-#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE)
-#define VEC_SHORT VEC_DATA_TYPE(short, VEC_SIZE)
-
-#if defined(DATA_TYPE) && defined(WEIGHTS_TYPE)
-
-#define VEC_TYPE(size) VEC_DATA_TYPE(DATA_TYPE, size)
-
-#if defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER))
-
-#if defined(WEIGHTS_PROMOTED_TYPE)
-#define VEC_WEIGHTS_PROMOTED_TYPE(size) VEC_DATA_TYPE(WEIGHTS_PROMOTED_TYPE, size)
-
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), val);
-#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define ARM_DOT(x, y, val) val += arm_dot((x), (y));
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-
-#if defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS)
-
-#if CONV_STRIDE_X > 3
-#error "Stride X not supported"
-#endif /* CONV_STRIDE_X > 3 */
-
-#if !defined(IS_DOT8)
-
-#if DILATION_X == 1
-
-#if CONV_STRIDE_X == 1
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int8 temp0 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value)), int8); \
- int2 temp1 = CONVERT(vload2(0, (__global DATA_TYPE *)(first_value + 8 * sizeof(DATA_TYPE))), int2); \
- \
- left = CONVERT(temp0.s01234567, int8); \
- middle = CONVERT((int8)(temp0.s1234, temp0.s567, temp1.s0), int8); \
- right = CONVERT((int8)(temp0.s2345, temp0.s67, temp1.s01), int8); \
- })
-#elif CONV_STRIDE_X == 2
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \
- int temp1 = CONVERT(*((__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))), int); \
- \
- left = CONVERT(temp0.s02468ace, int8); \
- middle = CONVERT(temp0.s13579bdf, int8); \
- right = CONVERT((int8)(temp0.s2468, temp0.sace, temp1), int8); \
- })
-#else /* CONV_STRIDE_X */
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \
- int8 temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))), int8); \
- \
- left = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \
- middle = CONVERT((int8)(temp0.s147a, temp0.sd, temp1.s036), int8); \
- right = CONVERT((int8)(temp0.s258b, temp0.se, temp1.s147), int8); \
- })
-#endif /* CONV_STRIDE_X */
-
-#else /* DILATION_X == 1 */
-
-#if CONV_STRIDE_X == 1
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- left = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value)), int8); \
- middle = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))), int8); \
- right = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))), int8); \
- })
-#elif CONV_STRIDE_X == 2
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \
- left = CONVERT(temp0.s02468ace, int8); \
- \
- temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))), int16); \
- middle = CONVERT(temp0.s02468ace, int8); \
- \
- temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))), int16); \
- right = CONVERT(temp0.s02468ace, int8); \
- })
-#else /* CONV_STRIDE_X */
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \
- int8 temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))), int8); \
- left = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \
- \
- temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))), int16); \
- temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + (16 + DILATION_X) * sizeof(DATA_TYPE))), int8); \
- middle = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \
- \
- temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))), int16); \
- temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + (16 + 2 * DILATION_X) * sizeof(DATA_TYPE))), int8); \
- right = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \
- })
-
-#endif /* CONV_STRIDE_X */
-#endif /* DILATION_X==1 */
-
-/** This function computes the depthwise convolution quantized.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32
- * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes)
- * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector
- * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32
- * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes)
- * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-
-__kernel void dwc_3x3_native_quantized8_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
- VECTOR_DECLARATION(output_multipliers),
- VECTOR_DECLARATION(output_shifts)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z;
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_multipliers);
- Vector output_shifts = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_shifts);
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
-
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- int bias_value = *((__global int *)(vector_offset(&biases, channel)));
-#endif //defined(HAS_BIAS)
-
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- src_addr -= batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z + (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
-
- VEC_DATA_TYPE(WEIGHTS_TYPE, 3)
- w0 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 0 * weights_stride_y));
- VEC_DATA_TYPE(WEIGHTS_TYPE, 3)
- w1 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 1 * weights_stride_y));
- VEC_DATA_TYPE(WEIGHTS_TYPE, 3)
- w2 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * weights_stride_y));
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- const int output_multiplier = *((__global int *)vector_offset(&output_multipliers, channel));
- const int output_shift = *((__global int *)vector_offset(&output_shifts, channel));
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
- int8 values0 = 0;
- int8 sum0 = 0;
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- int8 values1 = 0;
- int8 sum1 = 0;
-#endif /* CONV_STRIDE_Y &&DILATION_Y==1 */
-
- // Row0
- int8 left, middle, right;
- GET_VALUES(src_addr + 0 * src_stride_y, left, middle, right);
- values0 += left * (int8)(w0.s0);
- values0 += middle * (int8)(w0.s1);
- values0 += right * (int8)(w0.s2);
-
-#if WEIGHTS_OFFSET != 0
- sum0 += left + middle + right;
-#endif /* WEIGHTS_OFFSET != 0 */
-
- // Row1
- GET_VALUES(src_addr + DILATION_Y * src_stride_y, left, middle, right);
- values0 += left * (int8)(w1.s0);
- values0 += middle * (int8)(w1.s1);
- values0 += right * (int8)(w1.s2);
-
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += left * (int8)(w0.s0);
- values1 += middle * (int8)(w0.s1);
- values1 += right * (int8)(w0.s2);
-#endif /* CONV_STRIDE_Y && DILATION_Y== 1 */
-
-#if WEIGHTS_OFFSET != 0
- int8 tmp = left + middle + right;
- sum0 += tmp;
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- sum1 += tmp;
-#endif /* CONV_STRIDE_Y &&DILATION_Y== 1 */
-#endif /* WEIGHTS_OFFSET != 0 */
-
- // Row2
- GET_VALUES(src_addr + 2 * DILATION_Y * src_stride_y, left, middle, right);
- values0 += left * (int8)(w2.s0);
- values0 += middle * (int8)(w2.s1);
- values0 += right * (int8)(w2.s2);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += left * (int8)(w1.s0);
- values1 += middle * (int8)(w1.s1);
- values1 += right * (int8)(w1.s2);
-#endif /* CONV_STRIDE_Y &&DILATION_Y == 1 */
-
-#if WEIGHTS_OFFSET != 0
- tmp = left + middle + right;
- sum0 += tmp;
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- sum1 += tmp;
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- // Row3
- GET_VALUES(src_addr + 3 * src_stride_y, left, middle, right);
- values1 += left * (int8)(w2.s0);
- values1 += middle * (int8)(w2.s1);
- values1 += right * (int8)(w2.s2);
-
-#if WEIGHTS_OFFSET != 0
- sum1 += left + middle + right;
-#endif /* WEIGHTS_OFFSET != 0 */
-#endif /* CONV_STRIDE_Y && DILATION_Y == 1 */
-
-#if defined(HAS_BIAS)
- values0 += (int8)(bias_value);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += (int8)(bias_value);
-#endif /* CONV_STRIDE_Y & &DILATION_Y == 1 */
-#endif //defined(HAS_BIAS)
-
-#if WEIGHTS_OFFSET != 0
- values0 += sum0 * (int8)(WEIGHTS_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += sum1 * (int8)(WEIGHTS_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if INPUT_OFFSET != 0
- VEC_WEIGHTS_PROMOTED_TYPE(3)
- tmp_we = CONVERT(w0, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w1, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w2, VEC_WEIGHTS_PROMOTED_TYPE(3));
-
- WEIGHTS_PROMOTED_TYPE sum_weights = tmp_we.s0 + tmp_we.s1 + tmp_we.s2;
- values0 += sum_weights * (int8)(INPUT_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += sum_weights * (int8)(INPUT_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif /* INPUT_OFFSET != 0 */
-
-#if K_OFFSET != 0
- values0 += (int8)(K_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += (int8)(K_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-#endif /* K_OFFSET != 0 */
-
-#if defined(REAL_MULTIPLIER)
-
- values0 = CONVERT(round(CONVERT(values0, float8) * (float8)REAL_MULTIPLIER), int8);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- int8 res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, output_multiplier, output_shift, 8);
- int8 res0_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, output_multiplier, output_shift, 8);
- values0 = select(res0_shift_lt0, res0_shift_gt0, (int8)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- values0 += (int8)OUTPUT_OFFSET;
- VEC_TYPE(8)
- res0 = CONVERT_SAT(values0, VEC_TYPE(8));
-
- vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
-#if defined(REAL_MULTIPLIER)
-
- values1 = CONVERT(round(CONVERT(values1, float8) * (float8)REAL_MULTIPLIER), int8);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- int8 res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, output_multiplier, output_shift, 8);
- int8 res1_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, output_multiplier, output_shift, 8);
- values1 = select(res1_shift_lt0, res1_shift_gt0, (int8)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- values1 += (int8)OUTPUT_OFFSET;
- VEC_TYPE(8)
- res1 = CONVERT_SAT(values1, VEC_TYPE(8));
-
- vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-}
-
-#else // !defined(IS_DOT8)
-
-#if DILATION_X == 1
-#if CONV_STRIDE_X == 1
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(8) \
- temp0 = vload8(0, (__global DATA_TYPE *)(first_value)); \
- VEC_TYPE(2) \
- temp1 = vload2(0, (__global DATA_TYPE *)(first_value + 8 * sizeof(DATA_TYPE))); \
- \
- left = temp0.s01234567; \
- middle = (VEC_TYPE(8))(temp0.s1234, temp0.s567, temp1.s0); \
- right = (VEC_TYPE(8))(temp0.s2345, temp0.s67, temp1.s01); \
- })
-#elif CONV_STRIDE_X == 2
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(16) \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \
- DATA_TYPE temp1 = *((__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))); \
- \
- left = temp0.s02468ace; \
- middle = temp0.s13579bdf; \
- right = (VEC_TYPE(8))(temp0.s2468, temp0.sace, temp1); \
- })
-#else /* CONV_STRIDE_X */
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(16) \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \
- VEC_TYPE(8) \
- temp1 = vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))); \
- \
- left = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \
- middle = (VEC_TYPE(8))(temp0.s147a, temp0.sd, temp1.s036); \
- right = (VEC_TYPE(8))(temp0.s258b, temp0.se, temp1.s147); \
- })
-#endif /* CONV_STRIDE_X */
-#else /*DILATION_X==1*/
-
-#if CONV_STRIDE_X == 1
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- left = vload8(0, (__global DATA_TYPE *)(first_value)); \
- middle = vload8(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \
- right = vload8(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))); \
- })
-#elif CONV_STRIDE_X == 2
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(16) \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \
- left = temp0.s02468ace; \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \
- middle = temp0.s02468ace; \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))); \
- right = temp0.s02468ace; \
- })
-#else /* CONV_STRIDE_X */
-#define GET_VALUES(first_value, left, middle, right) \
- ({ \
- VEC_TYPE(16) \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \
- VEC_TYPE(8) \
- temp1 = vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))); \
- left = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \
- \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \
- temp1 = vload8(0, (__global DATA_TYPE *)(first_value + (16 + DILATION_X) * sizeof(DATA_TYPE))); \
- middle = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \
- \
- temp0 = vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))); \
- temp1 = vload8(0, (__global DATA_TYPE *)(first_value + (16 + 2 * DILATION_X) * sizeof(DATA_TYPE))); \
- right = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \
- })
-
-#endif /* CONV_STRIDE_X */
-#endif /*DILATION_X==1*/
-/** This function computes the depthwise convolution quantized using dot product when the data layout is NCHW.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32
- * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes)
- * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector
- * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32
- * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes)
- * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-
-__kernel void dwc_3x3_native_quantized8_dot8_nchw(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
- VECTOR_DECLARATION(output_multipliers),
- VECTOR_DECLARATION(output_shifts)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif //defined(HAS_BIAS)
-)
-{
- __global uchar *src_addr = src_ptr + get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + get_global_id(2) * src_step_z;
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_multipliers);
- Vector output_shifts = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_shifts);
-
- // Extract channel and linearized batch indices
- const int channel = get_global_id(2) % DST_CHANNELS;
- const int batch = get_global_id(2) / DST_CHANNELS;
-
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-
- const int bias_value = *((__global int *)(vector_offset(&biases, channel)));
-#endif //defined(HAS_BIAS)
-
- // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER)
- src_addr -= batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z + (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z;
- __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z;
-
- VEC_TYPE(3)
- w0 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 0 * weights_stride_y));
- VEC_TYPE(3)
- w1 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 1 * weights_stride_y));
- VEC_TYPE(3)
- w2 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * weights_stride_y));
-
- const int output_multiplier = *((__global int *)vector_offset(&output_multipliers, 0));
- const int output_shift = *((__global int *)vector_offset(&output_shifts, 0));
-
- VEC_TYPE(8)
- left0, middle0, right0;
- VEC_TYPE(8)
- left1, middle1, right1;
- VEC_TYPE(8)
- left2, middle2, right2;
-
- int8 values0 = 0;
- int8 sum0 = 0;
-
- GET_VALUES(src_addr + 0 * src_stride_y, left0, middle0, right0);
- GET_VALUES(src_addr + DILATION_Y * src_stride_y, left1, middle1, right1);
- GET_VALUES(src_addr + 2 * DILATION_Y * src_stride_y, left2, middle2, right2);
-
-#if WEIGHTS_OFFSET != 0
- sum0 += convert_int8(left0) + convert_int8(middle0) + convert_int8(right0);
- sum0 += convert_int8(left1) + convert_int8(middle1) + convert_int8(right1);
- sum0 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2);
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- // If conv_stride_y is equals to 1, we compute two output rows
-
- VEC_TYPE(8)
- left3, middle3, right3;
- int8 values1 = 0;
- int8 sum1 = 0;
-
- GET_VALUES(src_addr + 3 * src_stride_y, left3, middle3, right3);
-
-#if WEIGHTS_OFFSET != 0
- sum1 += convert_int8(left1) + convert_int8(middle1) + convert_int8(right1);
- sum1 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2);
- sum1 += convert_int8(left3) + convert_int8(middle3) + convert_int8(right3);
-#endif /* WEIGHTS_OFFSET != 0 */
-#endif // CONV_STRIDE_Y == 1 && DILATION_Y==1
-
- ARM_DOT((VEC_TYPE(4))(left0.s0, middle0.s0, right0.s0, left1.s0), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s0);
- ARM_DOT((VEC_TYPE(4))(middle1.s0, right1.s0, left2.s0, middle2.s0), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s0);
- values0.s0 += right2.s0 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s1, middle0.s1, right0.s1, left1.s1), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s1);
- ARM_DOT((VEC_TYPE(4))(middle1.s1, right1.s1, left2.s1, middle2.s1), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s1);
- values0.s1 += right2.s1 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s2, middle0.s2, right0.s2, left1.s2), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s2);
- ARM_DOT((VEC_TYPE(4))(middle1.s2, right1.s2, left2.s2, middle2.s2), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s2);
- values0.s2 += right2.s2 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s3, middle0.s3, right0.s3, left1.s3), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s3);
- ARM_DOT((VEC_TYPE(4))(middle1.s3, right1.s3, left2.s3, middle2.s3), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s3);
- values0.s3 += right2.s3 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s4, middle0.s4, right0.s4, left1.s4), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s4);
- ARM_DOT((VEC_TYPE(4))(middle1.s4, right1.s4, left2.s4, middle2.s4), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s4);
- values0.s4 += right2.s4 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s5, middle0.s5, right0.s5, left1.s5), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s5);
- ARM_DOT((VEC_TYPE(4))(middle1.s5, right1.s5, left2.s5, middle2.s5), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s5);
- values0.s5 += right2.s5 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s6, middle0.s6, right0.s6, left1.s6), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s6);
- ARM_DOT((VEC_TYPE(4))(middle1.s6, right1.s6, left2.s6, middle2.s6), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s6);
- values0.s6 += right2.s6 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left0.s7, middle0.s7, right0.s7, left1.s7), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s7);
- ARM_DOT((VEC_TYPE(4))(middle1.s7, right1.s7, left2.s7, middle2.s7), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s7);
- values0.s7 += right2.s7 * w2.s2;
-
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- ARM_DOT((VEC_TYPE(4))(left1.s0, middle1.s0, right1.s0, left2.s0), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s0);
- ARM_DOT((VEC_TYPE(4))(middle2.s0, right2.s0, left3.s0, middle3.s0), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s0);
- values1.s0 += right3.s0 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s1, middle1.s1, right1.s1, left2.s1), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s1);
- ARM_DOT((VEC_TYPE(4))(middle2.s1, right2.s1, left3.s1, middle3.s1), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s1);
- values1.s1 += right3.s1 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s2, middle1.s2, right1.s2, left2.s2), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s2);
- ARM_DOT((VEC_TYPE(4))(middle2.s2, right2.s2, left3.s2, middle3.s2), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s2);
- values1.s2 += right3.s2 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s3, middle1.s3, right1.s3, left2.s3), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s3);
- ARM_DOT((VEC_TYPE(4))(middle2.s3, right2.s3, left3.s3, middle3.s3), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s3);
- values1.s3 += right3.s3 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s4, middle1.s4, right1.s4, left2.s4), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s4);
- ARM_DOT((VEC_TYPE(4))(middle2.s4, right2.s4, left3.s4, middle3.s4), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s4);
- values1.s4 += right3.s4 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s5, middle1.s5, right1.s5, left2.s5), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s5);
- ARM_DOT((VEC_TYPE(4))(middle2.s5, right2.s5, left3.s5, middle3.s5), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s5);
- values1.s5 += right3.s5 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s6, middle1.s6, right1.s6, left2.s6), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s6);
- ARM_DOT((VEC_TYPE(4))(middle2.s6, right2.s6, left3.s6, middle3.s6), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s6);
- values1.s6 += right3.s6 * w2.s2;
-
- ARM_DOT((VEC_TYPE(4))(left1.s7, middle1.s7, right1.s7, left2.s7), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s7);
- ARM_DOT((VEC_TYPE(4))(middle2.s7, right2.s7, left3.s7, middle3.s7), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s7);
- values1.s7 += right3.s7 * w2.s2;
-#endif // CONV_STRIDE_Y == 1 && DILATION_Y==1
-
-#if defined(HAS_BIAS)
- values0 += (int8)(bias_value);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += (int8)(bias_value);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif //defined(HAS_BIAS)
-
-#if WEIGHTS_OFFSET != 0
- values0 += sum0 * (int8)(WEIGHTS_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += sum1 * (int8)(WEIGHTS_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if INPUT_OFFSET != 0
- WEIGHTS_PROMOTED_TYPE sum_weights = 0;
- VEC_WEIGHTS_PROMOTED_TYPE(3)
- tmp_we = CONVERT(w0, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w1, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w2, VEC_WEIGHTS_PROMOTED_TYPE(3));
- sum_weights += tmp_we.s0 + tmp_we.s1 + tmp_we.s2;
- values0 += sum_weights * (int8)(INPUT_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += sum_weights * (int8)(INPUT_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-#endif /* INPUT_OFFSET != 0 */
-
-#if K_OFFSET != 0
- values0 += (int8)(K_OFFSET);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
- values1 += (int8)(K_OFFSET);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-#endif /* K_OFFSET != 0 */
-
-#if defined(REAL_MULTIPLIER)
-
- values0 = CONVERT(round(CONVERT(values0, float8) * (float8)REAL_MULTIPLIER), int8);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- int8 res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, output_multiplier, output_shift, 8);
- int8 res0_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, output_multiplier, output_shift, 8);
- values0 = select(res0_shift_lt0, res0_shift_gt0, (int8)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- values0 += (int8)OUTPUT_OFFSET;
- VEC_TYPE(8)
- res0 = CONVERT_SAT(values0, VEC_TYPE(8));
-
- vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr);
-#if CONV_STRIDE_Y == 1 && DILATION_Y == 1
-
-#if defined(REAL_MULTIPLIER)
-
- values1 = CONVERT(round(CONVERT(values1, float8) * (float8)REAL_MULTIPLIER), int8);
-
-#else // defined(REAL_MULTIPLIER)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- int8 res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, output_multiplier, output_shift, 8);
- int8 res1_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, output_multiplier, output_shift, 8);
- values1 = select(res1_shift_lt0, res1_shift_gt0, (int8)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#endif // defined(REAL_MULTIPLIER)
-
- values1 += (int8)OUTPUT_OFFSET;
- VEC_TYPE(8)
- res1 = CONVERT_SAT(values1, VEC_TYPE(8));
-
- vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y);
-#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/
-}
-
-#endif // !defined(IS_DOT8)
-
-#endif /* defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) */
-
-#if defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT)
-
-#define asymm_mult_by_quant_multiplier_less_than_one(x, y, z) ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, y, z, VEC_SIZE)
-
-#define MULTIPLY_ADD(x, y, acc) acc += CONVERT(CONVERT(x, VEC_WEIGHTS_PROMOTED_TYPE(VEC_SIZE)) * CONVERT(y, VEC_WEIGHTS_PROMOTED_TYPE(VEC_SIZE)), VEC_INT)
-
-#if WEIGHTS_OFFSET != 0
-#define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) \
- ({ \
- sum += CONVERT(x, VEC_INT); \
- MULTIPLY_ADD(x, y, acc); \
- })
-#else /* WEIGHTS_OFFSET != 0 */
-#define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) MULTIPLY_ADD(x, y, acc)
-#endif /* WEIGHTS_OFFSET != 0 */
-
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-#define DOT_PRODUCT(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1) \
- ({ \
- ARM_DOT((VEC_TYPE(4))(val0, val1, val2, val3), w0.s0123, acc); \
- ARM_DOT((VEC_TYPE(4))(val4, val5, val6, val7), w0.s4567, acc); \
- acc += val8 * w1; \
- })
-
-#define DOT_PRODUCT_REDUCTION(sum, val0, val1, val2, val3, val4, val5, val6, val7, val8) \
- ({ \
- sum = val0; \
- ARM_DOT((VEC_TYPE(4))(val1, val2, val3, val4), (VEC_TYPE(4))1, sum); \
- ARM_DOT((VEC_TYPE(4))(val5, val6, val7, val8), (VEC_TYPE(4))1, sum); \
- })
-
-#define DOT_PRODUCT_REDUCTION_WEIGHTS(sum, w0, w1) \
- ({ \
- sum = w1; \
- ARM_DOT(w0.s0123, (VEC_TYPE(4))1, sum); \
- ARM_DOT(w0.s4567, (VEC_TYPE(4))1, sum); \
- })
-
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-
-#endif // defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT)
-
-#endif // defined(WEIGHTS_PROMOTED_TYPE)
-
-#endif // defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER))
-
-#if defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_SHIFT) && defined(OUTPUT_MULTIPLIER) && defined(VEC_SIZE_LEFTOVER)
-/** This function computes the depthwise convolution for NHWC data layout.
- *
- * @note The number of elements processed must be passed at compile time using -DN0 (e.g. -DN0=2)
- * @note The depth multiplier must be passed at compile time using -DDEPTH_MULTIPLIER (e.g. -DDEPTH_MULTIPLIER=1)
- * @note The first dimension of the input tensor must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM1=112)
- * @note The second dimension of the input tensor must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM2=80)
- * @note The kernel width must be passed at compile time using -DKERNEL_WIDTH (e.g. -DKERNEL_WIDTH=5)
- * @note The kernel height must be passed at compile time using -DKERNEL_HEIGHT (e.g. -DKERNEL_HEIGHT=5)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
- * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
- * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X)
- * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1)
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
- * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
- * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
- * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32
- * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes)
- * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector
- * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32
- * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes)
- * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void dwc_MxN_native_quantized8_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
- VECTOR_DECLARATION(output_multipliers),
- VECTOR_DECLARATION(output_shifts)
-#if defined(HAS_BIAS)
- ,
- VECTOR_DECLARATION(biases)
-#endif // defined(HAS_BIAS)
-)
-{
- int x_offs = max((int)(get_global_id(0) * N0 - (N0 - VEC_SIZE_LEFTOVER) % N0), 0);
- int y = get_global_id(1); // spatial coordinate x
-#if defined(DST_DEPTH)
- int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
- int b = get_global_id(2) / (int)DST_DEPTH; // batch
-#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
-#endif // defined(DST_DEPTH)
-
- __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE);
-
- __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER + y * dst_stride_y + z * dst_stride_z;
-
- __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + x_offs * sizeof(WEIGHTS_TYPE) * (int)DEPTH_MULTIPLIER;
-
-#if defined(HAS_BIAS)
- __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + x_offs * sizeof(int) * (int)DEPTH_MULTIPLIER;
-#endif // defined(HAS_BIAS)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- __global uchar *out_mul_addr = output_multipliers_ptr + output_multipliers_offset_first_element_in_bytes + x_offs * sizeof(int) * (int)DEPTH_MULTIPLIER;
- __global uchar *out_shift_addr = output_shifts_ptr + output_shifts_offset_first_element_in_bytes + x_offs * sizeof(int) * (int)DEPTH_MULTIPLIER;
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
-#if defined(DST_DEPTH)
- s_addr += b * src_stride_w;
- d_addr += b * dst_stride_w;
-#endif // defined(DST_DEPTH)
-
-#if DEPTH_MULTIPLIER > 1
- for(int d = 0; d < (int)DEPTH_MULTIPLIER; ++d)
- {
-#endif // DEPTH_MULTIPLIER > 1
- // Each work-item computes N0x1x1 elements
- VEC_INT res = 0;
-
- int x_coord = y * CONV_STRIDE_X - (int)CONV_PAD_LEFT;
- int y_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP;
-
- for(int yk = 0; yk < KERNEL_HEIGHT; ++yk)
- {
- if(y_coord >= 0 && y_coord < SRC_DIM2)
- {
- int x_coord_tmp = x_coord;
-
- for(int xk = 0; xk < KERNEL_WIDTH; ++xk)
- {
- if(x_coord_tmp >= 0 && x_coord_tmp < SRC_DIM1)
- {
- int s_offset = x_coord_tmp * (int)src_stride_y + y_coord * (int)src_stride_z;
- int w_offset = xk * weights_stride_y + yk * weights_stride_z;
-
- // Load input and weights values
- VEC_INT i = CONVERT(VLOAD(N0)(0, (__global DATA_TYPE *)(s_addr + s_offset)), VEC_INT);
- VEC_INT w = CONVERT(VLOAD(N0)(0, (__global WEIGHTS_TYPE *)(w_addr + w_offset)), VEC_INT);
-
- res += (i + (VEC_INT)INPUT_OFFSET) * (w + (VEC_INT)WEIGHTS_OFFSET);
- }
- x_coord_tmp += DILATION_X;
- }
- }
- y_coord += DILATION_Y;
- }
-
-#if defined(HAS_BIAS)
- VEC_INT bias = VLOAD(N0)(0, (__global int *)(b_addr));
- res += bias;
-#endif // defined(HAS_BIAS)
-
-#if defined(PER_CHANNEL_QUANTIZATION)
- VEC_INT output_multiplier = VLOAD(N0)(0, (__global int *)(out_mul_addr));
- VEC_INT output_shift = VLOAD(N0)(0, (__global int *)(out_shift_addr));
-
- VEC_INT res_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(res, output_multiplier, output_shift, N0);
- VEC_INT res_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(res, output_multiplier, output_shift, N0);
- res = select(res_shift_lt0, res_shift_gt0, (VEC_INT)(output_shift) >= 0);
-#else // defined(PER_CHANNEL_QUANTIZATION)
-#if OUTPUT_SHIFT < 0
- res = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(res, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, N0);
-#else // OUTPUT_SHIFT < 0
- res = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(res, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, N0);
-#endif // OUTPUT_OFFSET < 0
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-
- res += (VEC_INT)OUTPUT_OFFSET;
-
- VEC_TYPE(VEC_SIZE)
- res0 = CONVERT_SAT(res, VEC_TYPE(VEC_SIZE));
- res0 = ACTIVATION_FUNC(res0);
-
- STORE_VECTOR_SELECT(res, DATA_TYPE, d_addr, N0, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
-
-#if DEPTH_MULTIPLIER > 1
- w_addr += sizeof(WEIGHTS_TYPE);
- d_addr += sizeof(DATA_TYPE);
-#if defined(PER_CHANNEL_QUANTIZATION)
- out_mul_addr += sizeof(int);
- out_shift_addr += sizeof(int);
-#endif // defined(PER_CHANNEL_QUANTIZATION)
-#if defined(HAS_BIAS)
- b_addr += sizeof(int);
-#endif // defined(HAS_BIAS)
- }
-#endif // DEPTH_MULTIPLIER > 1
-}
-#endif // defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defiend(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_SHIFT) && defined(OUTPUT_MULTIPLIER) && defined(VEC_SIZE_LEFTOVER)
-#endif // defined(DATA_TYPE) && defined(WEIGHTS_TYPE)
diff --git a/src/core/CL/cl_kernels/direct_convolution.cl b/src/core/CL/cl_kernels/direct_convolution.cl
index c5444cd7cc..75a7a0f004 100644
--- a/src/core/CL/cl_kernels/direct_convolution.cl
+++ b/src/core/CL/cl_kernels/direct_convolution.cl
@@ -32,10 +32,9 @@
*
* @note Data layout supported: NHWC
* @note Data type supported: F32/F16/QASYMM8/QASYMM8_SIGNED
- * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
* @note The accumulation data type must be passed at compile time using -DACC_DATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half)
* @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
- * @note The convolution strides must be passed at compile time using -DSTRIDE and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
* @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
* @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
* @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
diff --git a/src/core/CL/cl_kernels/dwc_native_fp_nhwc.cl b/src/core/CL/cl_kernels/dwc_native_fp_nhwc.cl
new file mode 100644
index 0000000000..1ec85f37d3
--- /dev/null
+++ b/src/core/CL/cl_kernels/dwc_native_fp_nhwc.cl
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "helpers_asymm.h"
+#include "tile_helpers.h"
+
+#if defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_WIDTH) && defined(DST_HEIGHT) && defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP)
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the depthwise convolution for floating-point data types (F32/F16)
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The accumulation data type must be passed at compile time using -DACC_DATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
+ * @note The convolution dilations must be passed at compile time using -DDILATION_X and -DDILATION_Y (e.g. -DDILATION_X=2, -DDILATION_Y=2)
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
+ * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
+ * @note The channels of the destination tensor must be passed at compile time using -DDST_CHANNELS (e.g. -DDDST_CHANNELS=64)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
+ * @note The number of M0 rows (width) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The size of the partial store block in the first dimension must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note Only the following configurations of M0 and N0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 5, .... n (M0 != 1 with STRIDE_X == 1 && DILATION_X == 1 only)
+ * - N0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
+ * @note The number of rows to read from the src tensor must be passed at compile time using -DM0_A (e.g., -DM0_A=3). M0_A must be equal to WEI_WIDTH + (M0 - 1)
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
+ * @param[in] wei_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] wei_step_x wei_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_step_y wei_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_step_z wei_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_step_w wei_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the bias matrix
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr (if F32/F16) or S32 (if QASYMM8/QASYMM8_SIGNED)
+ * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
+ */
+//! @endcond
+__kernel void dwc_native_fp_nhwc(
+ TENSOR4D(src, SRC_TENSOR_TYPE),
+ TENSOR4D(dst, DST_TENSOR_TYPE),
+ TENSOR4D(wei, WEI_TENSOR_TYPE)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+ // All the tensor dimensions are passed at compile time.
+ // In case of dynamic tensor support, the following dimensions should be passed as function argument.
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _ISRC_WIDTH SRC_WIDTH
+#define _ISRC_HEIGHT SRC_HEIGHT
+#define _IDST_WIDTH DST_WIDTH
+#define _IDST_HEIGHT DST_HEIGHT
+#define _IDST_CHANNELS DST_CHANNELS
+#define _IM0_A M0_A // _IWEI_WIDTH + (M0 - 1) Rows tile A (If M0 != 1, the tiles overlap of 1 element on the X dimension)
+#define _IN0_A N0 // Cols tile A
+#define _IM0_B _IWEI_WIDTH // Rows tile B
+#define _IN0_B N0 // Cols tile B
+#define _IBOUNDARY_CHECK (!((WEI_WIDTH == 1 && WEI_HEIGHT == 1 && PAD_LEFT == 0 && PAD_TOP == 0 && M0 == 1)))
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int xo = GET_SPATIAL_IDX(1, M0, 0); // WIDTH
+#if defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0) % _IDST_HEIGHT; // HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0) / _IDST_HEIGHT; // BATCH SIZE IDX
+#else // defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0); // HEIGHT
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(BATCHED_EXECUTION)
+
+ int xi = xo * STRIDE_X;
+ int yi = yo * STRIDE_Y;
+ xi -= PAD_LEFT;
+ yi -= PAD_TOP;
+
+ int d = 0;
+#if DEPTH_MULTIPLIER != 1
+ for(; d < DEPTH_MULTIPLIER; d++)
+#endif // DEPTH_MULTIPLIER != 1
+ {
+ TILE(ACC_DATA_TYPE, M0, N0, c);
+
+ // Reset accumulators
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, yk, 0, 1, _IWEI_HEIGHT,
+ {
+ TILE(SRC_DATA_TYPE, _IM0_A, _IN0_A, a);
+
+ LOOP_UNROLLING(int, i, 0, 1, _IM0_A,
+ {
+ a[i].v = 0;
+ })
+
+ // Load tile from the src tensor (TILE A)
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, _IM0_A, _IN0_A, SRC_TENSOR_TYPE, src, bout, yi + yk * DILATION_Y, xi, cout, _ISRC_WIDTH, _ISRC_HEIGHT, DILATION_X, 1, _IBOUNDARY_CHECK, a);
+
+ TILE(WEI_DATA_TYPE, _IM0_B, _IN0_B, b);
+
+ // Load tile from the weights tensor (TILE B)
+ T_LOAD(WEI_DATA_TYPE, _IM0_B, _IN0_B, WEI_TENSOR_TYPE, wei, (cout * DEPTH_MULTIPLIER) + d, yk * _IM0_B, 1, wei_stride_y, b);
+
+ // Optimized path for STRIDE_X == 1
+ // If M0 != 1, we can skip the common loads between the two applied kernels on the X (WIDTH) dimension
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, xk, 0, 1, _IWEI_WIDTH,
+ {
+ c[m0].v += a[xk + m0].v *b[xk].v;
+ })
+ })
+ })
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, (cout * DEPTH_MULTIPLIER) + d, 0, 0, 0, bias0);
+
+ // c = c + bias[broadcasted]
+ T_ADD_BROADCAST_X(ACC_DATA_TYPE, M0, N0, c, bias0, c);
+#endif // HAS_BIAS
+
+ T_ACTIVATION(ACC_DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
+
+ TILE(uint, M0, 1, dst_indirect_y);
+
+ bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ if(x_cond)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ int xi_out = min(xo + M0 - 1 - m0, (int)(_IDST_WIDTH) - 1);
+ VSTORE_PARTIAL(N0, PARTIAL_N0)
+ (c[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + ((cout * DEPTH_MULTIPLIER) + d) * sizeof(DST_DATA_TYPE) + xi_out * dst_stride_y + yo * dst_stride_z + bout * dst_stride_w));
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ int xi_out = min(xo + M0 - 1 - m0, (int)(_IDST_WIDTH) - 1);
+ VSTORE(N0)
+ (c[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + ((cout * DEPTH_MULTIPLIER) + d) * sizeof(DST_DATA_TYPE) + xi_out * dst_stride_y + yo * dst_stride_z + bout * dst_stride_w));
+ })
+ }
+ }
+}
+#endif // defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_WIDTH) && defined(DST_HEIGHT) && defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/dwc_native_quantized_nhwc.cl b/src/core/CL/cl_kernels/dwc_native_quantized_nhwc.cl
new file mode 100644
index 0000000000..cd0f29230d
--- /dev/null
+++ b/src/core/CL/cl_kernels/dwc_native_quantized_nhwc.cl
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION(A_DATA_TYPE, B_DATA_TYPE) CALCULATE_WEIGHTS_OFFSET_CORRECTION_STR(A_DATA_TYPE, B_DATA_TYPE)
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_STR(A_DATA_TYPE, B_DATA_TYPE) CALCULATE_WEIGHTS_OFFSET_CORRECTION_##A_DATA_TYPE##_##B_DATA_TYPE
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_char_char (0)
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_uchar_uchar (0)
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_uchar_char (128)
+#define CALCULATE_WEIGHTS_OFFSET_CORRECTION_char_uchar (-128)
+
+#define T_LOAD_MULTIPLIERS_SHIFT_PER_TENSOR() \
+ ({})
+
+#define T_LOAD_MULTIPLIERS_SHIFT_PER_CHANNEL() \
+ TILE(DST_MULTIPLIERS_DATA_TYPE, 1, N0, multipliers); \
+ TILE(DST_SHIFTS_DATA_TYPE, 1, N0, shifts); \
+ T_LOAD(DST_MULTIPLIERS_DATA_TYPE, 1, N0, BUFFER, dst_multipliers, cout *DEPTH_MULTIPLIER + d, 0, 0, 0, multipliers); \
+ T_LOAD(DST_SHIFTS_DATA_TYPE, 1, N0, BUFFER, dst_shifts, cout *DEPTH_MULTIPLIER + d, 0, 0, 0, shifts);
+
+#define T_LOAD_MULTIPLIERS_SHIFT(QUANTIZATION_TYPE) T_LOAD_MULTIPLIERS_SHIFT_STR(QUANTIZATION_TYPE)
+#define T_LOAD_MULTIPLIERS_SHIFT_STR(QUANTIZATION_TYPE) T_LOAD_MULTIPLIERS_SHIFT_##QUANTIZATION_TYPE()
+
+#if defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_WIDTH) && defined(DST_HEIGHT) && defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP)
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the depthwise convolution for quantized data types
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: QSYMM8/QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
+ * @note The convolution dilations must be passed at compile time using -DDILATION_X and -DDILATION_Y (e.g. -DDILATION_X=2, -DDILATION_Y=2)
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
+ * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
+ * @note The channels of the destination tensor must be passed at compile time using -DDST_CHANNELS (e.g. -DDDST_CHANNELS=64)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=int8)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=int8)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=int8)
+ * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=int)
+ * @note The number of M0 rows (width) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The size of the partial store block in the first dimension must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note The activation type must be passed at compile using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
+ * @note The A and B variables required by some activation functions must be passed at compile time using -DA_VAL= and -DB_VAL= respectively
+ * @note The quantization offset used for both the per-tensor and per-channel quantization must be passed at compile using -DDST_OFFSET (e.g., -DDST_OFFSET=3)
+ * @note The quantization shift for the per-tensor quantization must be passed at compile time using -DDST_SHIFT (e.g., -DDST_SHIFT=1)
+ * @note The quantization multiplier for the per-tensor quantization must be passed at compile using -DDST_MULTIPLIER (e.g., -DDST_MULTIPLER=121432)
+ * @note Only the following configurations of M0 and N0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 5, .... n (M0 != 1 with STRIDE_X == 1 && DILATION_X == 1 only)
+ * - N0 = 2, 3, 4, 8, 16
+ * @note The number of rows to read from the src tensor must be passed at compile time using -DM0_A (e.g., -DM0_A=3). M0_A must be equal to WEI_WIDTH + (M0 - 1)
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: QSYMM8/QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
+ * @param[in] wei_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] wei_step_x wei_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_step_y wei_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_step_z wei_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_step_w wei_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] dst_multipliers_ptr Pointer to the destination multipliers tensor for the per-channel quantization. Supported data type: S32
+ * @param[in] dst_multipliers_stride_x Stride of the destination multipliers tensor in X dimension (in bytes)
+ * @param[in] dst_multipliers_step_x dst_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_multipliers_offset_first_element_in_bytes The offset of the first element in the destination multipliers tensor
+ * @param[in] dst_shifts_ptr Pointer to the destination shifts tensor for the per-channel quantization. Supported data type: S32
+ * @param[in] dst_shifts_stride_x Stride of the destination shifts tensor in X dimension (in bytes)
+ * @param[in] dst_shifts_step_x dst_shifts_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_shifts_offset_first_element_in_bytes The offset of the first element in the destination shifts tensor
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: S32
+ * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ */
+//! @endcond
+__kernel void dwc_native_quantized_nhwc(
+ TENSOR4D(src, SRC_TENSOR_TYPE),
+ TENSOR4D(dst, DST_TENSOR_TYPE),
+ TENSOR4D(wei, WEI_TENSOR_TYPE),
+ VECTOR_DECLARATION(dst_multipliers),
+ VECTOR_DECLARATION(dst_shifts)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+ // All the tensor dimensions are passed at compile time.
+ // In case of dynamic tensor support, the following dimensions should be passed as function argument.
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _ISRC_WIDTH SRC_WIDTH
+#define _ISRC_HEIGHT SRC_HEIGHT
+#define _IDST_WIDTH DST_WIDTH
+#define _IDST_HEIGHT DST_HEIGHT
+#define _IDST_CHANNELS DST_CHANNELS
+#define _IM0_A M0_A // _IWEI_WIDTH + (M0 - 1) Rows tile A (If M0 != 1, the tiles overlap of 1 element on the X dimension)
+#define _IN0_A N0 // Cols tile A
+#define _IM0_B _IWEI_WIDTH // Rows tile B
+#define _IN0_B N0 // Cols tile B
+#define _IBOUNDARY_CHECK (!((WEI_WIDTH == 1 && WEI_HEIGHT == 1 && PAD_LEFT == 0 && PAD_TOP == 0 && M0 == 1)))
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int xo = GET_SPATIAL_IDX(1, M0, 0); // WIDTH
+#if defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0) % _IDST_HEIGHT; // HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0) / _IDST_HEIGHT; // BATCH SIZE IDX
+#else // defined(BATCHED_EXECUTION)
+ const int yo = GET_SPATIAL_IDX(2, 1, 0); // HEIGHT
+ const int bout = 0; // BATCH SIZE IDX
+#endif // defined(BATCHED_EXECUTION)
+
+ int xi = xo * STRIDE_X;
+ int yi = yo * STRIDE_Y;
+ xi -= PAD_LEFT;
+ yi -= PAD_TOP;
+
+ int d = 0;
+#if DEPTH_MULTIPLIER != 1
+ for(; d < DEPTH_MULTIPLIER; d++)
+#endif // DEPTH_MULTIPLIER != 1
+ {
+ TILE(ACC_DATA_TYPE, M0, N0, c);
+
+ // Reset accumulators
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+ LOOP_UNROLLING(int, yk, 0, 1, _IWEI_HEIGHT,
+ {
+ TILE(SRC_DATA_TYPE, _IM0_A, _IN0_A, a);
+
+ LOOP_UNROLLING(int, i, 0, 1, _IM0_A,
+ {
+ a[i].v = ZERO_VALUE;
+ })
+
+ // Load tile from the src tensor (TILE A)
+ T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, _IM0_A, _IN0_A, SRC_TENSOR_TYPE, src, bout, yi + yk * DILATION_Y, xi, cout, _ISRC_WIDTH, _ISRC_HEIGHT, DILATION_X, 1, _IBOUNDARY_CHECK, a);
+
+ TILE(WEI_DATA_TYPE, _IM0_B, _IN0_B, b);
+
+ // Load tile from the weights tensor (TILE B)
+ T_LOAD(WEI_DATA_TYPE, _IM0_B, _IN0_B, WEI_TENSOR_TYPE, wei, cout * DEPTH_MULTIPLIER + d, yk * _IM0_B, 1, wei_stride_y, b);
+
+ // Optimized path for STRIDE_X == 1
+ // If M0 != 1, we can skip the common loads between the two applied kernels on the X (WIDTH) dimension
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+#if _IWEI_WIDTH <= 16
+#define DOT_DATA_TYPE SRC_DATA_TYPE
+#define WEI_OFFSET_CORRECTION (CALCULATE_WEIGHTS_OFFSET_CORRECTION(SRC_DATA_TYPE, WEI_DATA_TYPE))
+
+ // Optimized path for the dot instruction
+ TILE(DOT_DATA_TYPE, 1, _IWEI_WIDTH, x0);
+ TILE(DOT_DATA_TYPE, 1, _IWEI_WIDTH, y0);
+ ACC_DATA_TYPE offset_a = 0;
+ ACC_DATA_TYPE offset_b = 0;
+
+ LOOP_UNROLLING(int, xk, 0, 1, _IWEI_WIDTH,
+ {
+ x0[0].s[xk] = a[xk + m0].s[n0];
+ y0[0].s[xk] = b[xk].s[n0] + (int)WEI_OFFSET_CORRECTION;
+ })
+ DOT_PRODUCT_INTEGER8(DOT_DATA_TYPE, DOT_DATA_TYPE, ACC_DATA_TYPE, _IWEI_WIDTH, x0[0].v, y0[0].v, c[m0].s[n0]);
+ REDUCE_INTEGER8(DOT_DATA_TYPE, DOT_DATA_TYPE, ACC_DATA_TYPE, _IWEI_WIDTH, x0[0].v, offset_a);
+ REDUCE_INTEGER8(DOT_DATA_TYPE, DOT_DATA_TYPE, ACC_DATA_TYPE, _IWEI_WIDTH, y0[0].v, offset_b);
+ c[m0].s[n0] += offset_a * (ACC_DATA_TYPE)(WEI_OFFSET - (ACC_DATA_TYPE)WEI_OFFSET_CORRECTION) + offset_b * (ACC_DATA_TYPE)SRC_OFFSET;
+#else // _IWEI_WIDTH <= 16
+ LOOP_UNROLLING(int, xk, 0, 1, _IWEI_WIDTH,
+ {
+ c[m0].s[n0] += ((ACC_DATA_TYPE)a[xk + m0].s[n0] + (ACC_DATA_TYPE)(SRC_OFFSET)) * ((ACC_DATA_TYPE)b[xk].s[n0] + (ACC_DATA_TYPE)(WEI_OFFSET));
+ })
+#endif // _IWEI_WIDTH <= 16
+ })
+ })
+ })
+
+#if _IWEI_WIDTH <= 16
+ T_ADD_CONSTANT(ACC_DATA_TYPE, M0, N0, c, (_IWEI_WIDTH * _IWEI_HEIGHT * SRC_OFFSET * (ACC_DATA_TYPE)(WEI_OFFSET - (ACC_DATA_TYPE)WEI_OFFSET_CORRECTION)), c);
+#endif // _IWEI_WIDTH <= 16
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ // Load bias
+ T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout * DEPTH_MULTIPLIER + d, 0, 0, 0, bias0);
+
+ // c = c + bias[broadcasted]
+ T_ADD_BROADCAST_X(ACC_DATA_TYPE, M0, N0, c, bias0, c);
+#endif // HAS_BIAS
+
+ T_LOAD_MULTIPLIERS_SHIFT(QUANTIZATION_TYPE);
+
+ // Quantize the tile
+ TILE(DST_DATA_TYPE, M0, N0, cq);
+ T_QUANTIZE8(ACC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, multipliers, shifts, cq);
+
+ // Perform activation
+ T_ACTIVATION_QUANTIZED(DST_DATA_TYPE, M0, N0, ACTIVATION_TYPE, DST_OFFSET, A_VAL, B_VAL, cq, cq);
+
+ bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ if(x_cond)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ int xi_out = min(xo + M0 - 1 - m0, (int)(_IDST_WIDTH) - 1);
+ VSTORE_PARTIAL(N0, PARTIAL_N0)
+ (cq[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + ((cout * DEPTH_MULTIPLIER) + d) * sizeof(DST_DATA_TYPE) + xi_out * dst_stride_y + yo * dst_stride_z + bout * dst_stride_w));
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ int xi_out = min(xo + M0 - 1 - m0, (int)(_IDST_WIDTH) - 1);
+ VSTORE(N0)
+ (cq[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + ((cout * DEPTH_MULTIPLIER) + d) * sizeof(DST_DATA_TYPE) + xi_out * dst_stride_y + yo * dst_stride_z + bout * dst_stride_w));
+ })
+ }
+ }
+}
+#endif // defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_WIDTH) && defined(DST_HEIGHT) && defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h
index 7910b4ce0e..4bff7e665c 100644
--- a/src/core/CL/cl_kernels/tile_helpers.h
+++ b/src/core/CL/cl_kernels/tile_helpers.h
@@ -25,6 +25,40 @@
// *INDENT-OFF*
// clang-format off
+#define TILE_VECTOR_SIZE1 1
+#define TILE_VECTOR_SIZE2 2
+#define TILE_VECTOR_SIZE3 3
+#define TILE_VECTOR_SIZE4 4
+#define TILE_VECTOR_SIZE5 8
+#define TILE_VECTOR_SIZE6 8
+#define TILE_VECTOR_SIZE7 8
+#define TILE_VECTOR_SIZE8 8
+#define TILE_VECTOR_SIZE9 16
+#define TILE_VECTOR_SIZE10 16
+#define TILE_VECTOR_SIZE11 16
+#define TILE_VECTOR_SIZE12 16
+#define TILE_VECTOR_SIZE13 16
+#define TILE_VECTOR_SIZE14 16
+#define TILE_VECTOR_SIZE15 16
+#define TILE_VECTOR_SIZE16 16
+
+#define TILE_VECTOR_TYPE1(DATA_TYPE) DATA_TYPE##1
+#define TILE_VECTOR_TYPE2(DATA_TYPE) DATA_TYPE##2
+#define TILE_VECTOR_TYPE3(DATA_TYPE) DATA_TYPE##3
+#define TILE_VECTOR_TYPE4(DATA_TYPE) DATA_TYPE##4
+#define TILE_VECTOR_TYPE5(DATA_TYPE) DATA_TYPE##8
+#define TILE_VECTOR_TYPE6(DATA_TYPE) DATA_TYPE##8
+#define TILE_VECTOR_TYPE7(DATA_TYPE) DATA_TYPE##8
+#define TILE_VECTOR_TYPE8(DATA_TYPE) DATA_TYPE##8
+#define TILE_VECTOR_TYPE9(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE10(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE11(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE12(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE13(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE14(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE15(DATA_TYPE) DATA_TYPE##16
+#define TILE_VECTOR_TYPE16(DATA_TYPE) DATA_TYPE##16
+
/** Tile object
* A tile object is a 2D memory block and can be accessed using the following syntax:
* -# a[m0].v = access the the vector at row "m0" (OpenCL vector)
@@ -38,8 +72,8 @@
#define TILE(DATA_TYPE, H, W, BASENAME) TILE_STR(DATA_TYPE, H, W, BASENAME)
#define TILE_STR(DATA_TYPE, H, W, BASENAME) \
union { \
- DATA_TYPE s[W]; \
- DATA_TYPE##W v; \
+ DATA_TYPE s[TILE_VECTOR_SIZE##W]; \
+ TILE_VECTOR_TYPE##W(DATA_TYPE) v; \
} BASENAME[H]
#define TENSOR4D_IMAGE(name) \
@@ -235,52 +269,122 @@
*
* @note Performs: c += dot(a, b)
*
- * @param[in] DST_DATA_TYPE Accumulator data type
- * @param[in] K0 Number of accumulations
- * @param[in] a OpenCL vector a
- * @param[in] b OpenCL vector b
- * @param[in] c Scalar variable c
+ * @param[in] A_DATA_TYPE A (lhs) data type
+ * @param[in] B_DATA_TYPE B (rhs) data type
+ * @param[in] C_DATA_TYPE C (accumulator) data type
+ * @param[in] K0 Number of accumulations
+ * @param[in] a OpenCL vector a
+ * @param[in] b OpenCL vector b
+ * @param[in] c Scalar variable c
*/
-#define DOT_PRODUCT_INTEGER8(DST_DATA_TYPE, K0, a, b, c) DOT_PRODUCT_INTEGER8_STR(DST_DATA_TYPE, K0, a, b, c)
-#define DOT_PRODUCT_INTEGER8_STR(DST_DATA_TYPE, K0, a, b, c) DOT_PRODUCT##K0##_INTEGER8(DST_DATA_TYPE, a, b, c)
-#define DOT_PRODUCT1_INTEGER8(DST_DATA_TYPE, a, b, c) \
+#define DOT_PRODUCT_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c) DOT_PRODUCT_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c)
+#define DOT_PRODUCT_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c) DOT_PRODUCT##K0##_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c)
+#define DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
({ \
- c += (DST_DATA_TYPE)a * (DST_DATA_TYPE)b; \
+ c += (C_DATA_TYPE)(a) * (C_DATA_TYPE)(b); \
})
-#define DOT_PRODUCT2_INTEGER8(DST_DATA_TYPE, a, b, c) \
+#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
+#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0)), (c));
+#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0), (c));
+#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((a), (b), (c));
+#elif defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
+#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0), ), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0)));
+#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0));
+#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((a), (b));
+#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
+#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
({ \
- c += (DST_DATA_TYPE)a.s0 * (DST_DATA_TYPE)b.s0; \
- c += (DST_DATA_TYPE)a.s1 * (DST_DATA_TYPE)b.s1; \
+ c += (C_DATA_TYPE)(a).s0 * (C_DATA_TYPE)(b).s0; \
+ c += (C_DATA_TYPE)(a).s1 * (C_DATA_TYPE)(b).s1; \
})
-#define DOT_PRODUCT3_INTEGER8(DST_DATA_TYPE, a, b, c) \
+#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
({ \
- DOT_PRODUCT2_INTEGER8(DST_DATA_TYPE, a, b, c); \
- c += (DST_DATA_TYPE)a.s2 * (DST_DATA_TYPE)b.s2; \
+ DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c); \
+ c += (C_DATA_TYPE)(a).s2 * (C_DATA_TYPE)(b).s2; \
})
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define DOT_PRODUCT4_INTEGER8(DST_DATA_TYPE, x, y, val) val = arm_dot_acc((x), (y), (val));
-#elif defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-#define DOT_PRODUCT4_INTEGER8(DST_DATA_TYPE, x, y, val) val += arm_dot((x), (y));
-#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define DOT_PRODUCT4_INTEGER8(DST_DATA_TYPE, x, y, val) \
+#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, x, y, val) \
({ \
- val += (DST_DATA_TYPE)x.s0 * (DST_DATA_TYPE)y.s0; \
- val += (DST_DATA_TYPE)x.s1 * (DST_DATA_TYPE)y.s1; \
- val += (DST_DATA_TYPE)x.s2 * (DST_DATA_TYPE)y.s2; \
- val += (DST_DATA_TYPE)x.s3 * (DST_DATA_TYPE)y.s3; \
+ val += (C_DATA_TYPE)(x).s0 * (C_DATA_TYPE)(y).s0; \
+ val += (C_DATA_TYPE)(x).s1 * (C_DATA_TYPE)(y).s1; \
+ val += (C_DATA_TYPE)(x).s2 * (C_DATA_TYPE)(y).s2; \
+ val += (C_DATA_TYPE)(x).s3 * (C_DATA_TYPE)(y).s3; \
})
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define DOT_PRODUCT8_INTEGER8(DST_DATA_TYPE, a, b, c) \
- ({ \
- DOT_PRODUCT4_INTEGER8(DST_DATA_TYPE, (a.lo), (b.lo), c); \
- DOT_PRODUCT4_INTEGER8(DST_DATA_TYPE, (a.hi), (b.hi), c); \
+#define DOT_PRODUCT5_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \
+ DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s4), ((b).s4), c); \
+ })
+#define DOT_PRODUCT6_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \
+ DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s45), ((b).s45), c); \
+ })
+#define DOT_PRODUCT7_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \
+ DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s456), ((b).s456), c); \
+ })
+#define DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).lo), ((b).lo), c); \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).hi), ((b).hi), c); \
+ })
+#define DOT_PRODUCT9_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s8), ((b).s8), c); \
+ })
+#define DOT_PRODUCT10_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89), ((b).s89), c); \
+ })
+#define DOT_PRODUCT11_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89A), ((b).s89A), c); \
+ })
+#define DOT_PRODUCT12_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89AB), ((b).s89AB), c); \
+ })
+#define DOT_PRODUCT13_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT5_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89ABC), ((b).s89ABC), c); \
+ })
+#define DOT_PRODUCT14_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT6_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89ABCD), ((b).s89ABCD), c); \
+ })
+#define DOT_PRODUCT15_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
+ DOT_PRODUCT7_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89ABCDE), ((b).s89ABCDE), c); \
})
-#define DOT_PRODUCT16_INTEGER8(DST_DATA_TYPE, a, b, c) \
- ({ \
- DOT_PRODUCT8_INTEGER8(DST_DATA_TYPE, (a.lo), (b.lo), c); \
- DOT_PRODUCT8_INTEGER8(DST_DATA_TYPE, (a.hi), (b.hi), c); \
+#define DOT_PRODUCT16_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).lo), ((b).lo), c); \
+ DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).hi), ((b).hi), c); \
})
+/** Dot product integet 8bit function
+ *
+ * @note Performs: c += dot(a, b)
+ *
+ * @param[in] A_DATA_TYPE A (lhs) data type
+ * @param[in] B_DATA_TYPE B (rhs) data type
+ * @param[in] C_DATA_TYPE C (accumulator) data type
+ * @param[in] K0 Number of accumulations
+ * @param[in] a OpenCL vector a
+ * @param[in] c Scalar variable c
+ */
+#define REDUCE_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c) REDUCE_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c)
+#define REDUCE_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c) DOT_PRODUCT_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, (TILE_VECTOR_TYPE##K0(B_DATA_TYPE))1, c)
+
/** Load a vector from global memory (tensor)
*
* @param[in] DATA_TYPE Data type
@@ -296,7 +400,7 @@
#define V_LOAD_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y) V_LOAD_##TENSOR_TYPE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y)
#define V_LOAD_BUFFER(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y) \
VLOAD(WIDTH) \
- (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (Y)*STRIDE_Y))
+ (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (Y) * (STRIDE_Y)))
#define V_LOAD_IMAGE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y) READ_IMAGE2D(DATA_TYPE, CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(WIDTH), TENSOR##_img, (X) / 4, (Y))
/** Load a tile from global memory (tensor)
@@ -379,6 +483,53 @@
}) \
})
+/** Load a tile from global memory (tensor) when the tensor is stored using a NHWC layout with dilation for the X and Y increments
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] TILE_HEIGHT Number of elements to load from Y (height) dimension
+ * @param[in] TILE_WIDTH Number of elements to load from X (width) dimension
+ * @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
+ * In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] B Starting batch index
+ * @param[in] Y Starting Y index
+ * @param[in] X Starting X index
+ * @param[in] C Starting C index
+ * @param[in] TENSOR_HEIGHT Number of elements to load from Y (height) dimension
+ * @param[in] TENSOR_WIDTH Number of elements to load from X (width) dimension
+ * @param[in] DILATION_X Dilation for the X increment
+ * @param[in] DILATION_Y Dilation for the Y increment
+ * @param[in] BOUNDARY_CHECK Boundary check flag. If true, it checks for any out-of-bound reads
+ * @param[out] dst Output tile
+ */
+#define T_LOAD_NHWC_WITH_DILATION(DATA_TYPE, TILE_HEIGHT, TILE_WIDTH, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, DILATION_X, DILATION_Y, BOUNDARY_CHECK, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _yk, 0, 1, TILE_HEIGHT, \
+ { \
+ LOOP_UNROLLING(int, _xk, 0, 1, TILE_WIDTH, \
+ { \
+ int _src_y = (X) + _xk * (DILATION_X); \
+ int _src_z = ((Y) + _yk * (DILATION_Y)); \
+ int _src_w = (B); \
+ bool _src_valid_y = (((X) + _xk * (DILATION_X)) >= 0) && (((X) + _xk * (DILATION_X)) < (int)(TENSOR_WIDTH)) && (((Y) + _yk * (DILATION_Y)) >= 0) && (((Y) + _yk * (DILATION_Y)) < (int)(TENSOR_HEIGHT)); \
+ if(!(BOUNDARY_CHECK)) \
+ { \
+ dst[_xk + _yk * (TILE_WIDTH)].v = VLOAD(TILE_CHANNELS) \
+ (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (C) * sizeof(DATA_TYPE) + (_src_y) * (TENSOR##_stride_y) + (_src_z) * (TENSOR##_stride_z) + (_src_w) * (TENSOR##_stride_w))); \
+ } \
+ else \
+ { \
+ if(_src_valid_y) \
+ { \
+ dst[_xk + _yk * (TILE_WIDTH)].v = VLOAD(TILE_CHANNELS) \
+ (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (C) * sizeof(DATA_TYPE) + (_src_y) * (TENSOR##_stride_y) + (_src_z) * (TENSOR##_stride_z) + (_src_w) * (TENSOR##_stride_w))); \
+ } \
+ } \
+ }) \
+ }) \
+ })
+
/** Load a tile from global memory (tensor) when the tensor is stored using a NHWC layout using indirect X and Y coordinates
*
* @param[in] DATA_TYPE Data type
@@ -479,40 +630,160 @@
dst[_m0].s[_n0] += ((ACC_DATA_TYPE)rhs[_n0].s[_k0] * (ACC_DATA_TYPE)SRC_OFFSET); \
}) \
}) \
- }); \
+ }) \
+ })
+
+/** 8-bit quantization with fixed-point scale
+ *
+ * @param[in] SRC_DATA_TYPE SRC data type
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] QUANTIZATION_TYPE Quantization type (PER_TENSOR or PER_CHANNEL)
+ * @param[in] M0 Number of src/dst rows
+ * @param[in] N0 Number of src/dst columns
+ * @param[in] DST_OFFSET Quantization offset used for both the per-tensor and per-channel quantization
+ * @param[in] DST_SHIFT Quantization shift for the per-tensor quantization
+ * @param[in] DST_MULTIPLIER Quantization multiplier for the per-tensor quantization
+ * @param[in] src Input tile
+ * @param[in] dst_multipliers Output multipliers tile for the per-channel quantization
+ * @param[in] dst_shifts Output shift tile for the per-channel quantization
+ * @param[out] dst Output tile
+ */
+#define T_QUANTIZE8(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) T_QUANTIZE8_STR(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst)
+#define T_QUANTIZE8_STR(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) T_QUANTIZE8_##QUANTIZATION_TYPE(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst)
+
+/** 8-bit per-tensor quantization with fixed-point scale
+ *
+ * @param[in] SRC_DATA_TYPE SRC data type
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] M0 Number of src/dst rows
+ * @param[in] N0 Number of src/dst columns
+ * @param[in] DST_OFFSET Quantization offset
+ * @param[in] DST_SHIFT Quantization shift for the per-tensor quantization
+ * @param[in] DST_MULTIPLIER Quantization multiplier for the per-tensor quantization
+ * @param[in] src Input tile
+ * @param[in] dst_multipliers (unused)
+ * @param[in] dst_shifts (unused)
+ * @param[out] dst Output tile
+ */
+#define T_QUANTIZE8_PER_TENSOR(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ SRC_DATA_TYPE _tmp = 0; \
+ SRC_DATA_TYPE _src = src[_m0].s[_n0]; \
+ _src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-DST_SHIFT)), ((SRC_DATA_TYPE)DST_SHIFT < (SRC_DATA_TYPE)0)); \
+ SRC_DATA_TYPE overflow = _src == DST_MULTIPLIER && _src == INT_MIN; \
+ long a_64 = (long)(_src); \
+ long b_64 = (long)(DST_MULTIPLIER); \
+ long ab_64 = a_64 * b_64; \
+ long mask1 = 1 << 30; \
+ long mask2 = 1 - (1 << 30); \
+ long is_positive_or_zero = ab_64 >= 0; \
+ long nudge = select(mask2, mask1, is_positive_or_zero); \
+ SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \
+ _tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \
+ if(DST_SHIFT >= 0) \
+ { \
+ long mask = ((((int)1) << DST_SHIFT) - (int)1); \
+ long threshold = _tmp < (int)0 ? (mask >> 1) + (long)1 : (mask >> 1) + 0; \
+ _tmp = (_tmp & mask) > threshold ? (_tmp >> DST_SHIFT) + (int)1 : (_tmp >> DST_SHIFT); \
+ } \
+ _tmp += DST_OFFSET; \
+ dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
+ }) \
+ }) \
+ })
+
+/** 8-bit per-channel quantization with fixed-point scale
+ *
+ * @param[in] SRC_DATA_TYPE SRC data type
+ * @param[in] DST_DATA_TYPE DST data type
+ * @param[in] M0 Number of src/dst rows
+ * @param[in] N0 Number of src/dst columns
+ * @param[in] DST_OFFSET Quantization offset
+ * @param[in] DST_SHIFT (unused)
+ * @param[in] DST_MULTIPLIER (unused)
+ * @param[in] src Input tile
+ * @param[in] dst_multipliers Output multipliers tile for the per-channel quantization
+ * @param[in] dst_shifts Output shift tile for the per-channel quantization
+ * @param[out] dst Output tile
+ */
+#define T_QUANTIZE8_PER_CHANNEL(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ SRC_DATA_TYPE _tmp = 0; \
+ SRC_DATA_TYPE _src = src[_m0].s[_n0]; \
+ SRC_DATA_TYPE _dst_multiplier = dst_multipliers[0].s[_n0]; \
+ SRC_DATA_TYPE _dst_shift = dst_shifts[0].s[_n0]; \
+ _src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-_dst_shift)), ((SRC_DATA_TYPE)_dst_shift < (SRC_DATA_TYPE)0)); \
+ SRC_DATA_TYPE overflow = _src == _dst_multiplier && _src == INT_MIN; \
+ long a_64 = (long)(_src); \
+ long b_64 = (long)(_dst_multiplier); \
+ long ab_64 = a_64 * b_64; \
+ long mask1 = 1 << 30; \
+ long mask2 = 1 - (1 << 30); \
+ long is_positive_or_zero = ab_64 >= 0; \
+ long nudge = select(mask2, mask1, is_positive_or_zero); \
+ SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \
+ _tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \
+ if(_dst_shift >= 0) \
+ { \
+ long mask = ((((int)1) << _dst_shift) - (int)1); \
+ long threshold = _tmp < (int)0 ? (mask >> 1) + (long)1 : (mask >> 1) + 0; \
+ _tmp = (_tmp & mask) > threshold ? (_tmp >> _dst_shift) + (int)1 : (_tmp >> _dst_shift); \
+ } \
+ _tmp += DST_OFFSET; \
+ dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
+ }) \
+ }) \
})
-/** Quantized the tile (ASYMMETRIC) with fixed-point scale
+/** Quantized the 8-bit tile with fixed-point scale for asymmetric
*
* @param[in] SRC_DATA_TYPE SRC data type
* @param[in] DST_DATA_TYPE DST data type
* @param[in] M0 Number of src/dst rows
* @param[in] N0 Number of src/dst columns
- * @param[in] DST_OFFSET Quantization offset
- * @param[in] DST_SHIFT Quantization shift
- * @param[in] DST_MULTIPLIER Quantization multiplier
+ * @param[in] DST_OFFSET Quantization offset used for both the per-tensor and per-channel quantization
+ * @param[in] DST_SHIFT Quantization shift for the per-tensor quantization
+ * @param[in] DST_MULTIPLIER Quantization multiplier for the per-tensor quantization
* @param[in] src Input tile
* @param[out] dst Output tile
*/
-#define T_QUANTIZE8_ASYMMETRIC(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst) \
- ({ \
- LOOP_UNROLLING(int, _m0, 0, 1, M0, \
- { \
- LOOP_UNROLLING(int, _n0, 0, 1, N0, \
- { \
- SRC_DATA_TYPE _tmp = 0; \
- if(DST_SHIFT < 0) \
- { \
- _tmp = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(src[_m0].s[_n0], DST_MULTIPLIER, DST_SHIFT, 1); \
- } \
- else \
- { \
- _tmp = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(src[_m0].s[_n0], DST_MULTIPLIER, DST_SHIFT, 1); \
- } \
- _tmp += DST_OFFSET; \
- dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
- }) \
- }) \
+#define T_QUANTIZE8_ASYMMETRIC(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ SRC_DATA_TYPE _tmp = 0; \
+ SRC_DATA_TYPE _src = src[_m0].s[_n0]; \
+ _src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-DST_SHIFT)), ((SRC_DATA_TYPE)DST_SHIFT < (SRC_DATA_TYPE)0)); \
+ SRC_DATA_TYPE overflow = _src == DST_MULTIPLIER && _src == INT_MIN; \
+ long a_64 = (long)(_src); \
+ long b_64 = (long)(DST_MULTIPLIER); \
+ long ab_64 = a_64 * b_64; \
+ long mask1 = 1 << 30; \
+ long mask2 = 1 - (1 << 30); \
+ long is_positive_or_zero = ab_64 >= 0; \
+ long nudge = select(mask2, mask1, is_positive_or_zero); \
+ SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \
+ _tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \
+ if(DST_SHIFT >= 0) \
+ { \
+ long mask = ((((int)1) << DST_SHIFT) - (int)1); \
+ long threshold = _tmp < (int)0 ? (mask >> 1) + (long)1 : (mask >> 1) + 0; \
+ _tmp = (_tmp & mask) > threshold ? (_tmp >> DST_SHIFT) + (int)1 : (_tmp >> DST_SHIFT); \
+ } \
+ _tmp += DST_OFFSET; \
+ dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
+ }) \
+ }) \
})
/** Conditional rowset (memset by row)
@@ -537,7 +808,7 @@
}) \
})
-/** Element-wise activation
+/** Element-wise activation for floating point types
*
* @note Performs: activation(LHS) = DST
*
@@ -558,6 +829,42 @@
}) \
})
+// RELU Activation
+#define relu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (max((DATA_TYPE)ZERO_VALUE, x))
+// Bounded RELU Activation
+#define brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)ZERO_VALUE, x)))
+// Lower Upper Bounded RELU Activation
+#define lu_brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))
+// Hard Swish Activation
+#define hard_swish_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (x * ((min(max((DATA_TYPE)(x + (DATA_TYPE)3.f), (DATA_TYPE)0.f), (DATA_TYPE)6.f)) * (DATA_TYPE)0.166666667f))
+// Identity Activation
+#define identity_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (x)
+
+#define ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) op##_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x)
+#define ACTIVATION_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x)
+
+/** Element-wise activation for quantized types
+ *
+ * @note Performs: activation(LHS) = DST
+ *
+ * @param[in] DATA_TYPE SRC/DST data type
+ * @param[in] M0 Number of SRC/DST rows
+ * @param[in] N0 Number of SRC/DST columns
+ * @param[in] ACTIVATION_TYPE Activation type
+ * @param[in] ZERO_VALUE The zero value to consider in the computation
+ * @param[in] A_VAL A value used for the activation (e.g. tanh_op, brelu,..)
+ * @param[in] B_VAL B value used for the activation (e.g. tanh_op, brelu,..)
+ * @param[out] src SRC tile
+ * @param[out] dst DST tile
+ */
+#define T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_VALUE, A_VAL, B_VAL, src, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ dst[_m0].v = ACTIVATION_QUANTIZED(ACTIVATION_TYPE, DATA_TYPE, N0, ZERO_VALUE, A_VAL, B_VAL, src[_m0].v); \
+ }) \
+ })
+
/** Element-wise addition with a constant value
*
* @note Performs: LHS + constant = DST
@@ -617,13 +924,13 @@
* @param[in, out] dst DST tile
*/
#define T_MMUL(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, LHS_LAYOUT, RHS_LAYOUT, lhs, rhs, dst) T_MMUL_##LHS_LAYOUT##_##RHS_LAYOUT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
-#define T_MMUL_NT_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
-#define T_MMUL_NT_T_float_float_float(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
-#define T_MMUL_NT_T_half_half_half(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
-#define T_MMUL_NT_T_char_char_int(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
-#define T_MMUL_NT_T_uchar_uchar_uint(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
-#define T_MMUL_NT_T_uchar_uchar_int(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
-#define T_MMUL_NT_T_FLOAT(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
+#define T_MMUL_NT_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_char_char_int(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_uchar_uchar_uint(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_uchar_uchar_int(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
+#define T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
{ \
LOOP_UNROLLING(int, _m, 0, 1, M0, \
{ \
@@ -636,15 +943,16 @@
}) \
}) \
}
-#define T_MMUL_NT_T_INTEGER8(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
- ({ \
- LOOP_UNROLLING(int, _m, 0, 1, M0, \
- { \
- LOOP_UNROLLING(int, _n, 0, 1, N0, \
- { \
- DOT_PRODUCT_INTEGER8(DST_DATA_TYPE, K0, (lhs[_m].v), (rhs[_n].v), dst[_m].s[_n]); \
- }) \
- }) \
+
+#define T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n, 0, 1, N0, \
+ { \
+ DOT_PRODUCT_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, K0, (lhs[_m].v), (rhs[_n].v), dst[_m].s[_n]); \
+ }) \
+ }) \
})
/** 8-bit quantization with fixed-point scale
@@ -757,4 +1065,4 @@
}) \
})
// clang-format on
-// *INDENT-ON* \ No newline at end of file
+// *INDENT-ON*
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
deleted file mode 100644
index dda70d2231..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/CL/ICLKernel.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D dilation,
- const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((act_info.enabled()) && (input->data_type() == DataType::QASYMM8 || input->data_type() == DataType::QASYMM8_SIGNED)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::RELU)
- && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LOGISTIC),
- "For QASYMM8 only logistic, relu, lower bounded relu and lower-upper bounded relu are supported");
- ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(0) != 3 || weights->dimension(1) != 3);
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1 || conv_info.stride().first > 3);
-
- ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1));
-
- const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type());
-
- if(biases != nullptr)
- {
- if(is_qasymm)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
- }
- ARM_COMPUTE_RETURN_ERROR_ON((biases->dimension(0) != weights->dimension(2)) && (weights->dimension(2) != 1 || biases->dimension(0) != weights->dimension(3)));
- ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
- }
-
- if(is_qasymm)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output_multipliers, output_shifts);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
-
- if(is_data_type_quantized_per_channel(weights->data_type()))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QSYMM8_PER_CHANNEL);
- ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != output_multipliers->dimension(0));
- ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != output_shifts->dimension(0));
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON(1 != output_multipliers->dimension(0));
- ARM_COMPUTE_RETURN_ERROR_ON(1 != output_shifts->dimension(0));
- }
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- }
-
- if(output->total_size() != 0)
- {
- const ConvolutionInfo info{ conv_info, depth_multiplier, ActivationLayerInfo(), dilation };
- const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, info);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, std::string &kernel_name, const Size2D dilation)
-{
- // Output auto inizialitation if not yet initialized
- const ConvolutionInfo info
- {
- conv_info, depth_multiplier, ActivationLayerInfo(), dilation
- };
- const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, info);
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_quantization_info(output->quantization_info()));
-
- const unsigned int conv_stride_x = conv_info.stride().first;
- const unsigned int conv_stride_y = conv_info.stride().second;
- const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type());
-
- // Configure kernel window
- unsigned int num_elems_read_per_iteration_x = 0;
- unsigned int num_elems_read_per_iteration_y = 0;
- unsigned int num_elems_written_per_iteration_x = 0;
- unsigned int num_elems_written_per_iteration_y = 0;
-
- if(input->data_type() == DataType::F16)
- {
- kernel_name = "depthwise_convolution_3x3_f16";
- num_elems_written_per_iteration_x = 8 / data_size_from_type(input->data_type());
- num_elems_written_per_iteration_y = 1;
- num_elems_read_per_iteration_y = 3;
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 8;
- break;
- case 2:
- num_elems_read_per_iteration_x = 9;
- break;
- case 3:
- num_elems_read_per_iteration_x = 16;
- break;
- default:
- num_elems_read_per_iteration_x = 3 + (num_elems_written_per_iteration_x - 1) * conv_stride_x;
- break;
- }
- if(conv_stride_x == 1 && conv_stride_y == 1)
- {
- kernel_name = "depthwise_convolution_3x3_stridex1_stridey1_f16";
- num_elems_read_per_iteration_x = 8;
- num_elems_written_per_iteration_x = 4;
- num_elems_read_per_iteration_y = 6;
- num_elems_written_per_iteration_y = 4;
- }
- else if(conv_stride_x == 2 && conv_stride_y == 2)
- {
- kernel_name = "depthwise_convolution_3x3_stridex2_stridey2_f16";
- num_elems_read_per_iteration_x = 10;
- num_elems_written_per_iteration_x = 4;
- num_elems_read_per_iteration_y = 5;
- num_elems_written_per_iteration_y = 2;
- }
- }
- else if(input->data_type() == DataType::F32)
- {
- if(conv_stride_x == 1 && conv_stride_y == 1)
- {
- kernel_name = "depthwise_convolution_3x3_stridex1_stridey1_f32";
- num_elems_read_per_iteration_x = 4;
- num_elems_read_per_iteration_y = 6;
- num_elems_written_per_iteration_x = 2;
- num_elems_written_per_iteration_y = 4;
- }
- else if(conv_stride_x == 2 && conv_stride_y == 2)
- {
- kernel_name = "depthwise_convolution_3x3_stridex2_stridey2_f32";
- num_elems_read_per_iteration_x = 6;
- num_elems_read_per_iteration_y = 5;
- num_elems_written_per_iteration_x = 2;
- num_elems_written_per_iteration_y = 2;
- }
- else
- {
- kernel_name = "depthwise_convolution_3x3";
- num_elems_written_per_iteration_x = 8 / data_size_from_type(input->data_type());
- num_elems_written_per_iteration_y = 1;
- num_elems_read_per_iteration_x = 3 + (num_elems_written_per_iteration_x - 1) * conv_stride_x;
- num_elems_read_per_iteration_y = 3;
- }
- }
- else
- {
- const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()) && !is_data_type_quantized_per_channel(weights->data_type());
-
- kernel_name = is_qasymm ? "dwc_3x3_native_quantized8" : "depthwise_convolution_3x3";
- kernel_name += (is_qasymm && is_dot8_supported ? "_dot8" : "");
- kernel_name += (is_qasymm ? "_nchw" : "");
-
- num_elems_written_per_iteration_x = 8 / data_size_from_type(input->data_type());
- num_elems_written_per_iteration_y = (is_qasymm && conv_stride_y == 1 && dilation.y() == 1) ? 2 : 1;
- num_elems_read_per_iteration_x = 3 + (num_elems_written_per_iteration_x - 1) * conv_stride_x + (conv_stride_x > 1 ? 1 : 0);
- num_elems_read_per_iteration_y = num_elems_written_per_iteration_y + 2;
- }
- // The OpenCL routine convolution1x3 does loadn(addr), loadn(addr + dilation_x) and loadn(addr + 2 * dilation_x) on the input.
- // Each of the three convolution1x3 gets called by passing addr, (addr + dilation_y) and (addr + 2 * dilation_y)
- // Hence we must add 2 * dilation.x/y() to the number of elements read in those axes per thread
- num_elems_read_per_iteration_x += 2 * dilation.x();
- num_elems_read_per_iteration_y += 2 * dilation.y();
-
- // Create window and update padding
- Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
-
- AccessWindowRectangle input_access(input, -conv_info.pad_left(), -conv_info.pad_top(),
- num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
- conv_stride_x, conv_stride_y);
- AccessWindowStatic weights_access(weights, 0, 0, 3, 3);
- AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
-
- bool window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLDepthwiseConvolutionLayer3x3NCHWKernel::CLDepthwiseConvolutionLayer3x3NCHWKernel()
- : _border_size(0), _input(), _output(), _weights(), _biases(), _conv_stride_y(1), _output_multipliers(), _output_shifts(), _is_quantized(false), _conv_stride_x(0), _conv_pad_top(0), _conv_pad_left(0)
-{
-}
-
-BorderSize CLDepthwiseConvolutionLayer3x3NCHWKernel::border_size() const
-{
- return _border_size;
-}
-
-void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts);
-}
-
-void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
- const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
- conv_info, depth_multiplier, act_info, dilation,
- (output_multipliers != nullptr) ? output_multipliers->info() : nullptr,
- (output_shifts != nullptr) ? output_shifts->info() : nullptr));
-
- _input = input;
- _output = output;
- _weights = weights;
- _biases = biases;
- _conv_stride_x = conv_info.stride().first;
- _conv_stride_y = conv_info.stride().second;
- _conv_pad_left = conv_info.pad_left();
- _conv_pad_top = conv_info.pad_top();
- _output_multipliers = output_multipliers;
- _output_shifts = output_shifts;
- _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
-
- // Configure kernel window
- std::string kernel_name;
-
- auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, depth_multiplier, kernel_name, dilation);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- _border_size = BorderSize(input->info()->padding());
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(_output->info()->tensor_shape().z()));
- build_opts.add_option("-DDEPTH_MULTIPLIER=" + support::cpp11::to_string(depth_multiplier));
- build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
- build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x()));
- build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y()));
- build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
-
- if(_is_quantized)
- {
- const UniformQuantizationInfo iq_info = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wq_info = _weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = _output->info()->quantization_info().uniform();
-
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
- const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()) && !is_quantized_per_channel;
- build_opts.add_option("-DCONV_STRIDE_Y=" + support::cpp11::to_string(_conv_stride_y));
- build_opts.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iq_info.offset));
- build_opts.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wq_info.offset));
- build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oq_info.offset));
- build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(9 * iq_info.offset * wq_info.offset));
- build_opts.add_option_if(is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
- build_opts.add_option_if(is_dot8_supported, "-DIS_DOT8");
-
- // Compute non-per-channel multiplier and shift anyway to make OpenCL kernel simpler
- float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_opts.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_opts.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
-
- if(act_info.enabled())
- {
- int a_val{};
- int b_val{};
- std::tie(b_val, a_val) = get_quantized_activation_min_max(act_info, input->info()->data_type(), oq_info);
-
- const int o1 = oq_info.offset;
-
- build_opts.add_option("-DA_VAL=" + support::cpp11::to_string(a_val));
- build_opts.add_option("-DB_VAL=" + support::cpp11::to_string(b_val));
- build_opts.add_option("-DCONST_0=" + support::cpp11::to_string(o1));
-
- const float s1 = iq_info.scale;
- build_opts.add_option("-DS1_VAL=" + float_to_string_with_full_precision(s1));
- build_opts.add_option("-DO1_VAL=" + support::cpp11::to_string(o1));
- }
-
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DWEIGHTS_TYPE=" + get_cl_type_from_data_type(weights->info()->data_type()));
- build_opts.add_option("-DWEIGHTS_PROMOTED_TYPE=" + get_cl_promoted_type_from_data_type(weights->info()->data_type()));
- }
- else
- {
- build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
- build_opts.add_option_if(act_info.enabled(), "-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(win_config.second.x().step()));
- }
-
- build_opts.add_option_if(input->info()->data_type() == DataType::F16, "-DIS_F16");
- build_opts.add_option_if(input->info()->data_type() == DataType::F32, "-DIS_F32");
-
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(input->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
-}
-
-Status CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info,
- const Size2D &dilation, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
-{
- std::string kernel_name;
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(),
- conv_info, depth_multiplier, kernel_name, dilation)
- .first);
-
- return Status{};
-}
-
-void CLDepthwiseConvolutionLayer3x3NCHWKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
-
- // Create input window and adjust
- Window collapsed_in = collapsed;
- collapsed_in.adjust(Window::DimX, -_conv_pad_left, true);
- collapsed_in.adjust(Window::DimY, -_conv_pad_top, true);
- collapsed_in.set_dimension_step(Window::DimX, collapsed_in.x().step() * _conv_stride_x);
- collapsed_in.set_dimension_step(Window::DimY, collapsed_in.y().step() * _conv_stride_y);
-
- Window slice_in = collapsed_in.first_slice_window_3D();
- Window slice_out = collapsed.first_slice_window_3D();
- Window slice_weights = window.first_slice_window_3D();
- slice_weights.set_dimension_step(Window::DimX, 0);
- slice_weights.set_dimension_step(Window::DimY, 0);
-
- unsigned int idx = 3 * num_arguments_per_3D_tensor();
-
- // Set output multipliers in case of quantized data type
- if(_is_quantized)
- {
- Window slice;
- slice.use_tensor_dimensions(_output_multipliers->info()->tensor_shape());
- add_1D_tensor_argument(idx, _output_multipliers, slice);
- add_1D_tensor_argument(idx, _output_shifts, slice);
- }
-
- // Set biases
- if(_biases != nullptr)
- {
- Window slice_biases;
- slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
- add_1D_tensor_argument(idx, _biases, slice_biases);
- }
-
- do
- {
- idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
- add_3D_tensor_argument(idx, _output, slice_out);
- add_3D_tensor_argument(idx, _weights, slice_weights);
-
- enqueue(queue, *this, slice_out, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice_out) && collapsed_in.slide_window_slice_3D(slice_in));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
deleted file mode 100644
index c4e475f6f2..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNCHWKERNEL3x3_H
-#define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNCHWKERNEL3x3_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor when the data layout is NCHW.
- */
-class CLDepthwiseConvolutionLayer3x3NCHWKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLDepthwiseConvolutionLayer3x3NCHWKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayer3x3NCHWKernel(const CLDepthwiseConvolutionLayer3x3NCHWKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayer3x3NCHWKernel &operator=(const CLDepthwiseConvolutionLayer3x3NCHWKernel &) = delete;
- /** Default Move Constructor. */
- CLDepthwiseConvolutionLayer3x3NCHWKernel(CLDepthwiseConvolutionLayer3x3NCHWKernel &&) = default;
- /** Default move assignment operator */
- CLDepthwiseConvolutionLayer3x3NCHWKernel &operator=(CLDepthwiseConvolutionLayer3x3NCHWKernel &&) = default;
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [3, 3, IFM].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU for QASYMM8 supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- */
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [3, 3, IFM].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU for QASYMM8 supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
- /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NCHWKernel
- *
- * @param[in] input Source tensor info. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor info. A 3D tensor with dimensions [3, 3, IFM].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[in] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor info for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(),
- const Size2D &dilation = Size2D(1U, 1U), const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr);
-
- void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-private:
- BorderSize _border_size;
- const ICLTensor *_input;
- ICLTensor *_output;
- const ICLTensor *_weights;
- const ICLTensor *_biases;
- unsigned int _conv_stride_y;
- const ICLTensor *_output_multipliers;
- const ICLTensor *_output_shifts;
- bool _is_quantized;
-
- unsigned int _conv_stride_x;
- unsigned int _conv_pad_top;
- unsigned int _conv_pad_left;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNCHWKERNEL3x3_H */
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
deleted file mode 100644
index 91a2f5745a..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/CL/ICLKernel.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
-{
- ARM_COMPUTE_UNUSED(act_info);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier > 1);
-
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1);
- ARM_COMPUTE_RETURN_ERROR_ON(std::max(conv_info.pad_top(), conv_info.pad_bottom()) > 4);
-
- ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1));
-
- const size_t weights_width = 3;
- const size_t weights_height = 3;
-
- const ConvolutionInfo info{ conv_info, depth_multiplier, ActivationLayerInfo(), dilation };
-
- const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(
- *input, TensorInfo(TensorShape(weights_width, weights_height), 1, weights->data_type()).set_data_layout(DataLayout::NCHW), info);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(1) != weights_width) || (weights->dimension(2) != weights_height));
-
- if(biases != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != output_shape[0]);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
-
- ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
- }
-
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation)
-{
- ARM_COMPUTE_UNUSED(weights, bias);
- ARM_COMPUTE_UNUSED(depth_multiplier);
-
- const bool is_stride_1_dilation_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1) && dilation.x() == 1 && dilation.y() == 1);
- unsigned int num_rows_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1;
-
- Window win{};
- Status err{};
-
- unsigned int num_elems_accessed_per_iteration = adjust_vec_size(4 / input->element_size(), input->dimension(0));
- win = calculate_max_window(*output, Steps(num_elems_accessed_per_iteration, num_rows_processed_per_iteration));
-
- return std::make_pair(err, win);
-}
-} // namespace
-
-CLDepthwiseConvolutionLayer3x3NHWCKernel::CLDepthwiseConvolutionLayer3x3NHWCKernel()
- : _input(), _output(), _weights(), _biases(), _num_planes_processed_per_iteration(1)
-{
-}
-
-void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
-}
-
-void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
- conv_info, depth_multiplier, act_info, dilation));
-
- auto padding_info = get_padding_info({ input, weights, biases, output });
-
- auto win_config = validate_and_configure_window(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(),
- conv_info, depth_multiplier, dilation);
-
- const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1));
- const bool is_stride_1_dilation_1 = (is_stride_1 && dilation.x() == 1 && dilation.y() == 1);
-
- _input = input;
- _output = output;
- _weights = weights;
- _biases = biases;
- _num_planes_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1;
-
- unsigned int num_elems_accessed_per_iteration = adjust_vec_size(4 / input->info()->element_size(), input->info()->dimension(0));
- unsigned int num_rows_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1;
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_accessed_per_iteration));
- build_opts.add_option("-DSRC_DIM_1=" + support::cpp11::to_string(_input->info()->dimension(1)));
- build_opts.add_option("-DSRC_DIM_2=" + support::cpp11::to_string(_input->info()->dimension(2)));
- build_opts.add_option("-DCONV_PAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
- build_opts.add_option("-DCONV_PAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_accessed_per_iteration));
- build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
- build_opts.add_option_if(_input->info()->tensor_shape().total_size_upper(3) > 1,
- "-DDST_DEPTH=" + support::cpp11::to_string(static_cast<int>(std::ceil(_output->info()->dimension(2) / static_cast<float>(_num_planes_processed_per_iteration)))));
- build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
-
- if(is_stride_1_dilation_1)
- {
- build_opts.add_option("-DNUM_ROWS_PROCESSED=" + support::cpp11::to_string(num_rows_processed_per_iteration));
- build_opts.add_option("-DNUM_PLANES_PROCESSED=" + support::cpp11::to_string(_num_planes_processed_per_iteration));
- build_opts.add_option("-DDST_DIM_1=" + support::cpp11::to_string(_output->info()->dimension(1)));
- build_opts.add_option("-DDST_DIM_2=" + support::cpp11::to_string(_output->info()->dimension(2)));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string((input->info()->dimension(1) + conv_info.pad_left() + conv_info.pad_right()) % num_rows_processed_per_iteration));
- }
- else
- {
- build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(conv_info.stride().first));
- build_opts.add_option("-DCONV_STRIDE_Y=" + support::cpp11::to_string(conv_info.stride().second));
- build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x()));
- build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y()));
- }
-
- // Create kernel
- std::string kernel_name;
- kernel_name = std::string("depthwise_convolution_3x3_nhwc");
- kernel_name += (is_stride_1_dilation_1 ? "_stride1" : "");
-
- ICLKernel::configure_internal(win_config.second);
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += string_from_data_type(input->info()->data_type());
-}
-
-Status CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(),
- biases != nullptr ? biases->clone().get() : nullptr,
- output->clone().get(), conv_info, depth_multiplier, dilation)
- .first);
- return Status{};
-}
-
-void CLDepthwiseConvolutionLayer3x3NHWCKernel::run(const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- const size_t total_batches = _input->info()->tensor_shape().total_size_upper(3);
-
- Window win = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- win.set(Window::DimZ, Window::Dimension(0, std::ceil(_output->info()->dimension(2) / static_cast<float>(_num_planes_processed_per_iteration)) * total_batches, 1));
-
- unsigned int idx = 2 * num_arguments_per_4D_tensor() + num_arguments_per_3D_tensor();
-
- if(_biases != nullptr)
- {
- Window win_biases;
- win_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
- win_biases.set_dimension_step(Window::DimX, window.x().step());
- add_1D_tensor_argument(idx, _biases, win_biases);
- }
-
- Window slice = win.first_slice_window_4D();
- do
- {
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, slice);
- add_4D_tensor_argument(idx, _output, slice);
- add_3D_tensor_argument(idx, _weights, slice);
-
- enqueue(queue, *this, slice, lws_hint());
- }
- while(win.slide_window_slice_4D(slice));
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
deleted file mode 100644
index ee47d98807..0000000000
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNHWCKERNEL3x3_H
-#define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNHWCKERNEL3x3_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor when the data layout is NHWC.
- */
-class CLDepthwiseConvolutionLayer3x3NHWCKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLDepthwiseConvolutionLayer3x3NHWCKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayer3x3NHWCKernel(const CLDepthwiseConvolutionLayer3x3NHWCKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionLayer3x3NHWCKernel &operator=(const CLDepthwiseConvolutionLayer3x3NHWCKernel &) = delete;
- /** Default Move Constructor. */
- CLDepthwiseConvolutionLayer3x3NHWCKernel(CLDepthwiseConvolutionLayer3x3NHWCKernel &&) = default;
- /** Default move assignment operator */
- CLDepthwiseConvolutionLayer3x3NHWCKernel &operator=(CLDepthwiseConvolutionLayer3x3NHWCKernel &&) = default;
- /** Default move assignment operator. */
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] input Source tensor. DataType supported: F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, 3, 3].
- * Data type supported: Same as @p input.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- */
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. DataType supported: F16/F32.
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, 3, 3].
- * Data type supported: Same as @p input.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
- /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
- *
- * @param[in] input Source tensor info. DataType supported: F16/F32.
- * @param[in] weights Weights tensor info. A 3D tensor with dimensions [IFM, 3, 3].
- * Data type supported: Same as @p input.
- * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input.
- * @param[in] output Destination tensor info. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U));
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
- const ICLTensor *_weights;
- const ICLTensor *_biases;
-
- unsigned int _num_planes_processed_per_iteration;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNHWCKERNEL3x3_H */
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
index eb1cf146af..4bde303f1e 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
@@ -31,8 +31,10 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "src/core/CL/CLUtils.h"
#include "src/core/CL/CLValidate.h"
#include "src/core/CL/ICLKernel.h"
+#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
@@ -41,9 +43,8 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
- const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
{
ARM_COMPUTE_UNUSED(dwc_info);
bool in_place = false;
@@ -56,13 +57,18 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier > 1 && dwc_weights_info.n0 != 1);
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1);
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().second < 1);
- ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1));
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.depth_multiplier > 1 && dwc_info.n0 != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.stride().first > 1 && dwc_info.m0 != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.dilation.x() > 1 && dwc_info.m0 != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((dwc_info.export_weights_to_cl_image == true) && (export_weights_to_cl_image(weights) == false), "Export to cl_image not supported!");
+ ARM_COMPUTE_RETURN_ERROR_ON((dwc_info.export_weights_to_cl_image == true) && (conv_info.depth_multiplier > 1));
+ ARM_COMPUTE_RETURN_ERROR_ON((dwc_info.export_weights_to_cl_image == true) && ((dwc_info.n0 % 4) != 0));
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.stride().first < 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.stride().second < 1);
+ ARM_COMPUTE_RETURN_ERROR_ON((conv_info.dilation.x() < 1) || (conv_info.dilation.y() < 1));
const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
ARM_COMPUTE_UNUSED(idx_c);
- ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_c) != (input->dimension(idx_c) * depth_multiplier));
+ ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_c) != (input->dimension(idx_c) * conv_info.depth_multiplier));
// In place restrictions
if(in_place)
@@ -70,14 +76,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
const int weights_width_idx = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH);
const int weights_height_idx = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::HEIGHT);
ARM_COMPUTE_RETURN_ERROR_ON(weights->tensor_shape()[weights_width_idx] != 1U || weights->tensor_shape()[weights_height_idx] != 1U);
- ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier != 1U);
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride() != std::make_pair(1U, 1U));
- ARM_COMPUTE_RETURN_ERROR_ON(dilation != Size2D(1U, 1U));
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.has_padding()); // Note that in princple padding can be supported with in_place but we choose not to support it
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.depth_multiplier != 1U);
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.stride() != std::make_pair(1U, 1U));
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.dilation != Size2D(1U, 1U));
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.has_padding()); // Note that in princple padding can be supported with in_place but we choose not to support it
}
- const ConvolutionInfo info{ conv_info, depth_multiplier, ActivationLayerInfo(), dilation };
- const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, info);
+ const ConvolutionInfo info{ conv_info.pad_stride_info, conv_info.depth_multiplier, ActivationLayerInfo(), conv_info.dilation };
+ const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info);
const bool is_quantized = is_data_type_quantized(input->data_type());
@@ -152,21 +158,21 @@ CLDepthwiseConvolutionLayerNativeKernel::CLDepthwiseConvolutionLayerNativeKernel
_depth_multiplier(1),
_output_multipliers(nullptr),
_output_shifts(nullptr),
+ _export_to_cl_image(false),
_is_quantized(false)
{
_type = CLKernelType::DEPTHWISE;
}
void CLDepthwiseConvolutionLayerNativeKernel::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const DWCWeightsKernelInfo &dwc_weights_info, const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
+ const DWCComputeKernelInfo &dwc_info, const ConvolutionInfo &conv_info,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation, output_multipliers, output_shifts);
+ configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, dwc_info, conv_info, output_multipliers, output_shifts);
}
void CLDepthwiseConvolutionLayerNativeKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
+ const DWCComputeKernelInfo &dwc_info, const ConvolutionInfo &conv_info,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
@@ -176,93 +182,113 @@ void CLDepthwiseConvolutionLayerNativeKernel::configure(const CLCompileContext &
output = input;
}
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
- dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation,
- (output_multipliers != nullptr) ? output_multipliers->info() : nullptr, (output_shifts != nullptr) ? output_shifts->info() : nullptr));
+ dwc_info, conv_info, (output_multipliers != nullptr) ? output_multipliers->info() : nullptr, (output_shifts != nullptr) ? output_shifts->info() : nullptr));
auto padding_info = get_padding_info({ input, output });
- const ConvolutionInfo info{ conv_info, depth_multiplier, ActivationLayerInfo(), dilation };
- const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*(input->info()), *(weights->info()), info);
+ const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*(input->info()), *(weights->info()), conv_info);
auto_init_if_empty(*(output->info()), input->info()->clone()->set_tensor_shape(output_shape).set_quantization_info(output->info()->quantization_info()));
_input = input;
_output = output;
_weights = weights;
_biases = biases;
- _depth_multiplier = depth_multiplier;
+ _depth_multiplier = conv_info.depth_multiplier;
_output_multipliers = output_multipliers;
_output_shifts = output_shifts;
+ _export_to_cl_image = dwc_info.export_weights_to_cl_image;
_is_quantized = is_data_type_quantized(input->info()->data_type());
- const unsigned int n0 = adjust_vec_size(dwc_weights_info.n0, input->info()->dimension(0));
+ const unsigned int n0 = adjust_vec_size(dwc_info.n0, input->info()->dimension(0));
+ const unsigned int m0 = std::min(dwc_info.m0, (unsigned int)output->info()->dimension(1));
+ std::string kernel_name = "";
CLBuildOptions build_opts;
- build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
- build_opts.add_option_if(_input->info()->tensor_shape().total_size_upper(3) > 1, "-DDST_DEPTH=" + support::cpp11::to_string(static_cast<int>(_output->info()->dimension(2))));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(dwc_info.activation_info.activation())));
- build_opts.add_option("-DDEPTH_MULTIPLIER=" + support::cpp11::to_string(depth_multiplier));
+
+ // Update the padding for the weights tensor if we can export to cl_image
+ if(_export_to_cl_image)
+ {
+ arm_compute::opencl::kernels::gemm::update_padding_for_cl_image(weights->info());
+ }
+
+ build_opts.add_option("-cl-fast-relaxed-math");
+ build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(conv_info.act_info.activation())));
+ build_opts.add_option("-DDEPTH_MULTIPLIER=" + support::cpp11::to_string(conv_info.depth_multiplier));
+ build_opts.add_option("-DSRC_TENSOR_TYPE=BUFFER");
+ build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(_input->info()->dimension(1)));
+ build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(_input->info()->dimension(2)));
+ // Note: SRC_DATA_TYPE must have the same data type of WEI_DATA_TYPE. In quantized, we could
+ // have a case where the data types for the activation and weights are different. However, since the implementation
+ // only works when both have same data type, we have to change the offset to take into account this aspect
+ build_opts.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
+ build_opts.add_option("-DDST_TENSOR_TYPE=BUFFER");
+ build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(1)));
+ build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(2)));
+ build_opts.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(_output->info()->data_type()));
+ build_opts.add_option_if_else(_export_to_cl_image, "-DWEI_TENSOR_TYPE=IMAGE", "-DWEI_TENSOR_TYPE=BUFFER");
+ build_opts.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(_weights->info()->dimension(1)));
+ build_opts.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(_weights->info()->dimension(2)));
+ build_opts.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(_weights->info()->data_type()));
+ build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_stride_info.pad_top()));
+ build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_stride_info.pad_left()));
+ build_opts.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_info.pad_stride_info.stride().first));
+ build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_info.pad_stride_info.stride().second));
+ build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(conv_info.dilation.x()));
+ build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(conv_info.dilation.y()));
build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
- build_opts.add_option("-DSRC_DIM1=" + support::cpp11::to_string(_input->info()->dimension(1)));
- build_opts.add_option("-DSRC_DIM2=" + support::cpp11::to_string(_input->info()->dimension(2)));
- build_opts.add_option("-DKERNEL_WIDTH=" + support::cpp11::to_string(weights->info()->dimension(1)));
- build_opts.add_option("-DKERNEL_HEIGHT=" + support::cpp11::to_string(weights->info()->dimension(2)));
- build_opts.add_option("-DCONV_PAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
- build_opts.add_option("-DCONV_PAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
- build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(conv_info.stride().first));
- build_opts.add_option("-DCONV_STRIDE_Y=" + support::cpp11::to_string(conv_info.stride().second));
- build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x()));
- build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y()));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(_input->info()->dimension(0) % n0));
-
- std::string kernel_name = (_is_quantized) ? "dwc_MxN_native_quantized8_nhwc" : "dwc_MxN_native_fp_nhwc";
+ build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
+ build_opts.add_option("-DM0_A=" + support::cpp11::to_string(weights->info()->dimension(1) + m0 - 1));
+ build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(_input->info()->dimension(0) % n0));
+ build_opts.add_option_if(_input->info()->num_dimensions() > 3, "-DBATCHED_EXECUTION");
+ if(biases != nullptr)
+ {
+ build_opts.add_option(std::string("-DHAS_BIAS"));
+ build_opts.add_option(std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(biases->info()->data_type())));
+ }
if(_is_quantized)
{
- const UniformQuantizationInfo iq_info = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wq_info = _weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = _output->info()->quantization_info().uniform();
+ kernel_name = "dwc_native_quantized_nhwc";
+ const UniformQuantizationInfo iqinfo = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo wqinfo = weights->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oqinfo = output->info()->quantization_info().uniform();
- build_opts.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iq_info.offset));
- build_opts.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wq_info.offset));
- build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oq_info.offset));
- build_opts.add_option_if(is_data_type_quantized_per_channel(weights->info()->data_type()), "-DPER_CHANNEL_QUANTIZATION");
+ PixelValue zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
+ int zero_value_s32;
+ zero_value.get(zero_value_s32);
- // Compute non-per-channel multiplier and shift anyway to make OpenCL kernel simpler
- float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
+ float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
int output_multiplier = 0;
int output_shift = 0;
quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_opts.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_opts.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
-
- if(dwc_info.activation_info.enabled())
- {
- int a_val{};
- int b_val{};
- std::tie(b_val, a_val) = get_quantized_activation_min_max(dwc_info.activation_info, input->info()->data_type(), oq_info);
-
- const int o1 = oq_info.offset;
-
- build_opts.add_option("-DA_VAL=" + support::cpp11::to_string(a_val));
- build_opts.add_option("-DB_VAL=" + support::cpp11::to_string(b_val));
- build_opts.add_option("-DCONST_0=" + support::cpp11::to_string(o1));
-
- const float s1 = iq_info.scale;
- build_opts.add_option("-DS1_VAL=" + float_to_string_with_full_precision(s1));
- build_opts.add_option("-DO1_VAL=" + support::cpp11::to_string(o1));
- }
-
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DWEIGHTS_TYPE=" + get_cl_type_from_data_type(weights->info()->data_type()));
+ build_opts.add_option("-DDST_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
+ build_opts.add_option("-DDST_SHIFT=" + support::cpp11::to_string(output_shift));
+ build_opts.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
+ build_opts.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
+ build_opts.add_option("-DDST_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
+ build_opts.add_option("-DZERO_VALUE=" + support::cpp11::to_string(zero_value_s32));
+ build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(DataType::S32));
+ build_opts.add_option("-DDST_MULTIPLIERS_DATA_TYPE=" + get_cl_type_from_data_type(_output_multipliers->info()->data_type()));
+ build_opts.add_option("-DDST_SHIFTS_DATA_TYPE=" + get_cl_type_from_data_type(_output_shifts->info()->data_type()));
+ build_opts.add_option_if_else(weights->info()->data_type() == DataType::QSYMM8_PER_CHANNEL, "-DQUANTIZATION_TYPE=PER_CHANNEL", "-DQUANTIZATION_TYPE=PER_TENSOR");
+ // Note: We expect the input and output tensors to always adopt a per-tensor quantization approach
+ int a_val{};
+ int b_val{};
+ std::tie(b_val, a_val) = get_quantized_activation_min_max(conv_info.act_info, input->info()->data_type(), oqinfo);
+
+ build_opts.add_option_if(conv_info.act_info.enabled(), "-DA_VAL=" + support::cpp11::to_string(a_val));
+ build_opts.add_option_if(conv_info.act_info.enabled(), "-DB_VAL=" + support::cpp11::to_string(b_val));
}
else
{
- build_opts.add_option_if(dwc_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(dwc_info.activation_info.a()));
- build_opts.add_option_if(dwc_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(dwc_info.activation_info.b()));
+ kernel_name = "dwc_native_fp_nhwc";
+ build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DZERO_VALUE=" + support::cpp11::to_string(0));
+ build_opts.add_option_if(conv_info.act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(conv_info.act_info.a()));
+ build_opts.add_option_if(conv_info.act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(conv_info.act_info.b()));
}
- Window win = calculate_max_window(*(output->info()), Steps(n0));
+ Window win = calculate_max_window(*(output->info()), Steps(n0, m0));
ICLKernel::configure_internal(win);
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
@@ -288,10 +314,9 @@ void CLDepthwiseConvolutionLayerNativeKernel::configure(const CLCompileContext &
}
Status CLDepthwiseConvolutionLayerNativeKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const DWCWeightsKernelInfo &dwc_weights_info, const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const Size2D &dilation, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
+ const DWCComputeKernelInfo &dwc_info, const ConvolutionInfo &conv_info, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation, output_multipliers, output_shifts));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, dwc_info, conv_info, output_multipliers, output_shifts));
return Status{};
}
@@ -302,37 +327,46 @@ void CLDepthwiseConvolutionLayerNativeKernel::run(const Window &window, cl::Comm
// Collapse window
Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ);
- Window slice_in = window.first_slice_window_4D();
- Window slice_out = window_collapsed.first_slice_window_4D();
+
+ Window slice = window_collapsed.first_slice_window_4D();
if(_depth_multiplier != 1)
{
- ARM_COMPUTE_ERROR_ON(slice_out.x().step() != 1);
- slice_out.set(Window::DimX, Window::Dimension(0, _input->info()->tensor_shape()[0], 1));
+ // If the depth multiplier > 1, we need to use the input channels rather than the output channels
+ ARM_COMPUTE_ERROR_ON(slice.x().step() != 1);
+ slice.set(Window::DimX, Window::Dimension(0, _input->info()->tensor_shape()[0], 1));
}
- unsigned int idx = 2 * num_arguments_per_4D_tensor() + num_arguments_per_3D_tensor();
+ cl::Image2D weights_cl_image;
- // Set output multipliers in case of quantized data type
- if(_is_quantized)
+ if(_export_to_cl_image)
{
- add_1D_tensor_argument(idx, _output_multipliers, slice_in);
- add_1D_tensor_argument(idx, _output_shifts, slice_in);
+ const size_t image_w = _weights->info()->dimension(0) / 4;
+ const size_t image_h = _weights->info()->dimension(1) * _weights->info()->dimension(2) * _weights->info()->dimension(3);
+ const TensorShape shape2d(image_w, image_h);
+ const size_t image_row_pitch = _weights->info()->strides_in_bytes()[1];
+
+ // Export cl_buffer to cl_image
+ weights_cl_image = create_image2d_from_buffer(CLKernelLibrary::get().context(), _weights->cl_buffer(), shape2d, _weights->info()->data_type(), image_row_pitch);
}
- if(_biases != nullptr)
+ unsigned int idx = 0;
+ add_4D_tensor_argument(idx, _input, slice);
+ add_4D_tensor_argument(idx, _output, slice);
+ if(_export_to_cl_image)
{
- add_1D_tensor_argument(idx, _biases, slice_in);
+ _kernel.setArg(idx++, weights_cl_image);
}
-
- do
+ add_4D_tensor_argument(idx, _weights, slice);
+ if(_is_quantized)
+ {
+ add_1D_tensor_argument(idx, _output_multipliers, slice);
+ add_1D_tensor_argument(idx, _output_shifts, slice);
+ }
+ if(_biases != nullptr)
{
- idx = 0;
- add_4D_tensor_argument(idx, _input, slice_in);
- add_4D_tensor_argument(idx, _output, slice_out);
- add_3D_tensor_argument(idx, _weights, slice_out);
- enqueue(queue, *this, slice_out, lws_hint());
+ add_1D_tensor_argument(idx, _biases, slice);
}
- while(window_collapsed.slide_window_slice_4D(slice_out) && window.slide_window_slice_4D(slice_in));
+ enqueue(queue, *this, slice, lws_hint());
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
index 068131f434..eeed115832 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
@@ -47,23 +47,21 @@ public:
CLDepthwiseConvolutionLayerNativeKernel(CLDepthwiseConvolutionLayerNativeKernel &&) = default;
/** Allow instances of this class to be moved */
CLDepthwiseConvolutionLayerNativeKernel &operator=(CLDepthwiseConvolutionLayerNativeKernel &&) = default;
+
/** Initialize the function's source, destination and parameters
*
- * @param[in] compile_context The compile context to be used.
- * @param[in,out] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC
- * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, N, M].
- * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
- * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Pass in nullptr or @p input for in-place operation. Data type supported: Same as @p input.
- * @param[in] dwc_weights_info Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread
- * @param[in] dwc_info Depthwise convolution layer info
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
- * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
- * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC
+ * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, N, M].
+ * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
+ * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
+ * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
+ * @param[out] output Destination tensor. Pass in nullptr or @p input for in-place operation. Data type supported: Same as @p input.
+ * @param[in] dwc_info Depthwise convolution layer info
+ * @param[in] conv_info Convolution info (padding, stride, dilation, ...)
+ * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
+ * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
+ * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
*
* @note: In-place is only supported when
@@ -75,17 +73,15 @@ public:
* * no padding
* * no change of data layout after configure
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info, const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
- /** Initialize the function's source, destination and parameters
+ /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel
*
* Similar to @ref CLDepthwiseConvolutionLayerNativeKernel::configure()
*/
- void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
- const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
+ void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info, const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel
*
@@ -93,9 +89,8 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCWeightsKernelInfo &dwc_weights_info,
- const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
- const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCComputeKernelInfo &dwc_info,
+ const ConvolutionInfo &conv_info, const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -108,6 +103,7 @@ private:
unsigned int _depth_multiplier{ 0 };
const ICLTensor *_output_multipliers{};
const ICLTensor *_output_shifts{};
+ bool _export_to_cl_image { true };
bool _is_quantized{ false };
};
} // namespace arm_compute
diff --git a/src/core/gpu/cl/ClKernelLibrary.cpp b/src/core/gpu/cl/ClKernelLibrary.cpp
index 9d516e54a7..73da93c1f5 100644
--- a/src/core/gpu/cl/ClKernelLibrary.cpp
+++ b/src/core/gpu/cl/ClKernelLibrary.cpp
@@ -223,23 +223,13 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "crop_tensor", "crop_tensor.cl" },
{ "deconvolution_reshape", "deconvolution_layer.cl" },
{ "deconvolution_upsample", "deconvolution_layer.cl" },
- { "depthwise_convolution_3x3", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_f16", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_nhwc", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_nhwc_stride1", "depthwise_convolution.cl" },
- { "dwc_MxN_native_fp_nhwc", "depthwise_convolution.cl" },
- { "dwc_MxN_native_quantized8_nhwc", "depthwise_convolution_quantized.cl" },
- { "dwc_3x3_native_quantized8_nchw", "depthwise_convolution_quantized.cl" },
- { "dwc_3x3_native_quantized8_dot8_nchw", "depthwise_convolution_quantized.cl" },
{ "depth_to_space_nchw", "depth_to_space.cl" },
{ "depth_to_space_nhwc", "depth_to_space.cl" },
- { "depthwise_convolution_3x3_stridex1_stridey1_f16", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_stridex2_stridey2_f16", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_stridex1_stridey1_f32", "depthwise_convolution.cl" },
- { "depthwise_convolution_3x3_stridex2_stridey2_f32", "depthwise_convolution.cl" },
{ "dequantization_layer", "dequantization_layer.cl" },
{ "dequantization_layer_per_channel_nhwc", "dequantization_layer.cl" },
{ "dequantization_layer_per_channel_nchw", "dequantization_layer.cl" },
+ { "dwc_native_fp_nhwc", "dwc_native_fp_nhwc.cl" },
+ { "dwc_native_quantized_nhwc", "dwc_native_quantized_nhwc.cl" },
{ "direct_convolution_nhwc", "direct_convolution.cl" },
{ "direct_convolution1x1", "direct_convolution1x1.cl" },
{ "direct_convolution1x1_f32_bifrost", "direct_convolution1x1.cl" },
@@ -575,14 +565,6 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/depth_to_space.clembed"
},
{
- "depthwise_convolution.cl",
-#include "./cl_kernels/depthwise_convolution.clembed"
- },
- {
- "depthwise_convolution_quantized.cl",
-#include "./cl_kernels/depthwise_convolution_quantized.clembed"
- },
- {
"dequantization_layer.cl",
#include "./cl_kernels/dequantization_layer.clembed"
},
@@ -607,6 +589,14 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/direct_convolution.clembed"
},
{
+ "dwc_native_fp_nhwc.cl",
+#include "./cl_kernels/dwc_native_fp_nhwc.clembed"
+ },
+ {
+ "dwc_native_quantized_nhwc.cl",
+#include "./cl_kernels/dwc_native_quantized_nhwc.clembed"
+ },
+ {
"elementwise_operation.cl",
#include "./cl_kernels/elementwise_operation.clembed"
},
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index a826f85c5c..84798fa672 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -23,16 +23,15 @@
*/
#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
+#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/PixelValue.h"
+#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h"
-#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h"
#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
namespace arm_compute
{
@@ -41,70 +40,97 @@ using namespace arm_compute::misc::shape_calculator;
namespace
{
-Status validate_arguments_3x3(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
+bool export_weights_to_cl_image_heuristic(const ITensorInfo *weights, unsigned int depth_multiplier, GPUTarget gpu_target)
{
- // This function should be removed and incorporated inside CLDepthwiseConvolutionLayerInternal3x3 once CLDepthwiseConvolutionLayer3x3 is properly removed
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN);
+ if(!export_weights_to_cl_image(weights))
+ {
+ return false;
+ }
- const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
- const bool is_nhwc = input->data_layout() == DataLayout::NHWC;
- const bool needs_permute = is_nhwc && (depth_multiplier > 1);
+ const size_t idx_w = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::HEIGHT);
+ const size_t kernel_w = weights->tensor_shape()[idx_w];
+ const size_t kernel_h = weights->tensor_shape()[idx_h];
- ARM_COMPUTE_RETURN_ERROR_ON(is_quantized && is_nhwc && !needs_permute);
+ if((kernel_w == 1) && (kernel_h == 1))
+ {
+ return false;
+ }
- TensorInfo output_multipliers_shifts_info(TensorInfo(TensorShape(1U), 1, DataType::S32));
- if(is_quantized)
+ if(depth_multiplier > 1)
{
- if(is_data_type_quantized_per_channel(weights->data_type()))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QSYMM8_PER_CHANNEL);
+ return false;
+ }
- const size_t idx_c = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::CHANNEL);
- output_multipliers_shifts_info.set_tensor_shape(TensorShape(weights->dimension(idx_c)));
+ if(gpu_target == GPUTarget::G71 || get_arch_from_target(gpu_target) == GPUTarget::MIDGARD)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void initialize_dwc_native_compute_info(DWCComputeKernelInfo &dwc_compute_info, const ITensorInfo *weights, const PadStrideInfo &conv_info, const Size2D &dilation, unsigned int depth_multiplier,
+ GPUTarget gpu_target)
+{
+ if(!is_data_type_float(weights->data_type()))
+ {
+ dwc_compute_info.export_weights_to_cl_image = false;
+ dwc_compute_info.n0 = (depth_multiplier == 1) ? 4 : 1;
+ if(conv_info.stride().first == 1 && dilation.x() == 1 && depth_multiplier == 1)
+ {
+ dwc_compute_info.m0 = 2;
}
else
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ dwc_compute_info.m0 = 1;
}
- }
- if(needs_permute)
- {
- TensorShape permuted_input_shape = input->tensor_shape();
- TensorShape permuted_weights_shape = weights->tensor_shape();
- const ConvolutionInfo info{ conv_info, depth_multiplier, ActivationLayerInfo(), dilation };
- TensorShape permuted_output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, info);
+ return;
+ }
- permute(permuted_input_shape, PermutationVector(1U, 2U, 0U));
- permute(permuted_weights_shape, PermutationVector(1U, 2U, 0U));
- permute(permuted_output_shape, PermutationVector(1U, 2U, 0U));
+ // Floating point path
- const TensorInfo permuted_input = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NCHW);
- const TensorInfo permuted_weights = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NCHW);
- const TensorInfo permuted_output = output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NCHW);
+ // First check if we can export to cl_image.
+ dwc_compute_info.export_weights_to_cl_image = export_weights_to_cl_image_heuristic(weights, depth_multiplier, gpu_target);
- ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output,
- conv_info, depth_multiplier, act_info,
- dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
+ // Set n0
+ if(depth_multiplier == 1)
+ {
+ if(dwc_compute_info.export_weights_to_cl_image == false && weights->data_type() == DataType::F16)
+ {
+ dwc_compute_info.n0 = 8;
+ }
+ else
+ {
+ dwc_compute_info.n0 = 4;
+ }
}
- else if(is_nhwc)
+ else
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info,
- dilation));
+ dwc_compute_info.n0 = 1;
+ }
+
+ dwc_compute_info.n0 = adjust_vec_size(dwc_compute_info.n0, weights->dimension(0));
+
+ // Set m0 only if stride_x == 1 and dilation_x == 1
+ if(conv_info.stride().first == 1 && dilation.x() == 1)
+ {
+ const size_t idx_w = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t kernel_w = weights->tensor_shape()[idx_w];
+
+ dwc_compute_info.m0 = (kernel_w >= 9) || (kernel_w == 1) ? 1 : 2;
}
else
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info,
- dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
+ dwc_compute_info.m0 = 1;
}
- return Status{};
+ return;
}
+
} // namespace
-CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::CLDepthwiseConvolutionLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager)
+CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
_dwc_native_kernel(std::make_unique<CLDepthwiseConvolutionLayerNativeKernel>()),
_permute_input_to_nhwc(),
@@ -126,15 +152,15 @@ CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::CLDepthwiseConv
CLDepthwiseConvolutionLayer::~CLDepthwiseConvolutionLayer() = default;
-void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
+void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
{
configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
}
-void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
- ICLTensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
+void CLDepthwiseConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
+ ICLTensor *output, const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
ARM_COMPUTE_ERROR_THROW_ON(CLDepthwiseConvolutionLayer::validate(input->info(),
@@ -153,6 +179,8 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::configure(
_output = output;
_needs_permute = input->info()->data_layout() == DataLayout::NCHW;
+ const GPUTarget gpu_target = CLScheduler::get().target();
+
ICLTensor *input_to_use = input;
const ICLTensor *weights_to_use = weights;
ICLTensor *output_to_use = output;
@@ -191,13 +219,13 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::configure(
output_shifts_to_use = &_output_shifts;
}
- DWCWeightsKernelInfo dwc_weights_info;
- dwc_weights_info.n0 = (depth_multiplier == 1) ? 8 : 1;
- DWCKernelInfo dwc_info;
- dwc_info.activation_info = act_info;
+ DWCComputeKernelInfo dwc_native_compute_info;
+ initialize_dwc_native_compute_info(dwc_native_compute_info, weights_to_use->info(), conv_info, dilation, depth_multiplier, gpu_target);
+
+ const ConvolutionInfo conv_kernel_info{ conv_info, depth_multiplier, act_info, dilation };
+
_dwc_native_kernel->configure(compile_context, input_to_use, weights_to_use, biases, output_to_use,
- dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation,
- output_multipliers_to_use, output_shifts_to_use);
+ dwc_native_compute_info, conv_kernel_info, output_multipliers_to_use, output_shifts_to_use);
if(_needs_permute)
{
@@ -216,9 +244,9 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::configure(
}
}
-Status CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
+Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
+ const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
{
const bool in_place = input == output || output == nullptr;
if(in_place)
@@ -232,10 +260,9 @@ Status CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::validate
ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) + (weights->dimension(idx_w) - 1) * (dilation.x() - 1) > input->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right());
ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) + (weights->dimension(idx_h) - 1) * (dilation.y() - 1) > input->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom());
- DWCWeightsKernelInfo dwc_weights_info;
- dwc_weights_info.n0 = (depth_multiplier == 1) ? 8 : 1;
- DWCKernelInfo dwc_info;
- dwc_info.activation_info = act_info;
+ const GPUTarget gpu_target = CLScheduler::get().target();
+
+ const ConvolutionInfo conv_kernel_info{ conv_info, depth_multiplier, act_info, dilation };
const bool needs_permute = input->data_layout() == DataLayout::NCHW;
@@ -275,20 +302,25 @@ Status CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::validate
ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(input, &permuted_input, PermutationVector(2U, 0U, 1U)));
ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(weights, &permuted_weights, PermutationVector(2U, 0U, 1U)));
- ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output, dwc_weights_info,
- dwc_info, conv_info, depth_multiplier, dilation,
- &output_multipliers_shifts_info, &output_multipliers_shifts_info));
+
+ DWCComputeKernelInfo dwc_native_compute_info;
+ initialize_dwc_native_compute_info(dwc_native_compute_info, &permuted_weights, conv_info, dilation, depth_multiplier, gpu_target);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output,
+ dwc_native_compute_info, conv_kernel_info, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(&permuted_output, output, PermutationVector(1U, 2U, 0U)));
}
else
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier,
- dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
+ DWCComputeKernelInfo dwc_native_compute_info;
+ initialize_dwc_native_compute_info(dwc_native_compute_info, weights, conv_info, dilation, depth_multiplier, gpu_target);
+ ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(input, weights, biases, output, dwc_native_compute_info, conv_kernel_info, &output_multipliers_shifts_info,
+ &output_multipliers_shifts_info));
}
return Status{};
}
-void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::run()
+void CLDepthwiseConvolutionLayer::run()
{
prepare();
@@ -305,7 +337,7 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::run()
}
}
-void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::prepare()
+void CLDepthwiseConvolutionLayer::prepare()
{
if(!_is_prepared)
{
@@ -333,308 +365,4 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::prepare()
_is_prepared = true;
}
}
-
-CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::CLDepthwiseConvolutionLayerInternal3x3(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)),
- _kernel_nchw(nullptr),
- _kernel_nhwc(nullptr),
- _border_handler(std::make_unique<CLFillBorderKernel>()),
- _permute_input_to_nchw(),
- _permute_weights_to_nchw(),
- _permute_output_to_nhwc(),
- _permuted_input(),
- _permuted_weights(),
- _permuted_output(),
- _output_multipliers(),
- _output_shifts(),
- _original_weights(nullptr),
- _input(nullptr),
- _output(nullptr),
- _needs_permute(false),
- _is_prepared(false),
- _is_quantized(false),
- _is_nhwc(false)
-{
-}
-
-void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
-}
-
-void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
- ICLTensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
-{
- // Perform validation step
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(CLDepthwiseConvolutionLayerInternal3x3::validate(input->info(),
- weights->info(),
- biases != nullptr ? biases->info() : nullptr,
- output->info(),
- conv_info,
- depth_multiplier,
- act_info,
- dilation));
-
- _is_nhwc = input->info()->data_layout() == DataLayout::NHWC;
- _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
- _needs_permute = _is_nhwc && (depth_multiplier > 1);
-
- _is_prepared = false;
- _original_weights = weights;
- _input = input;
- _output = output;
-
- ICLTensor *input_to_use = input;
- const ICLTensor *weights_to_use = weights;
- ICLTensor *output_to_use = output;
-
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
-
- if(_needs_permute)
- {
- _memory_group.manage(&_permuted_input);
- _memory_group.manage(&_permuted_output);
-
- // Configure the function to transform the input tensor from NHWC -> NCHW
- _permute_input_to_nchw.configure(compile_context, input, &_permuted_input, PermutationVector(1U, 2U, 0U));
- _permuted_input.info()->set_data_layout(DataLayout::NCHW);
-
- // Configure the function to transform the weights tensor from HWI -> IHW
- _permute_weights_to_nchw.configure(compile_context, weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
- _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
- _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
-
- input_to_use = &_permuted_input;
- weights_to_use = &_permuted_weights;
- output_to_use = &_permuted_output;
-
- _kernel_nchw = std::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
- }
- else if(_is_nhwc)
- {
- _kernel_nhwc = std::make_unique<CLDepthwiseConvolutionLayer3x3NHWCKernel>();
- }
- else
- {
- _kernel_nchw = std::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
- }
-
- CLTensor *output_multipliers_to_use = nullptr;
- CLTensor *output_shifts_to_use = nullptr;
- if(_is_quantized)
- {
- const size_t idx_c = get_data_layout_dimension_index(weights->info()->data_layout(), DataLayoutDimension::CHANNEL);
- const size_t num_filters = (is_quantized_per_channel) ? weights->info()->dimension(idx_c) : 1;
-
- _output_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
- _output_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
-
- output_multipliers_to_use = &_output_multipliers;
- output_shifts_to_use = &_output_shifts;
- }
-
- // Configure kernel
- if(_is_nhwc && !_needs_permute)
- {
- _kernel_nhwc->configure(compile_context, input_to_use, weights_to_use, biases, output_to_use, conv_info, depth_multiplier,
- act_info, dilation);
- }
- else
- {
- _kernel_nchw->configure(compile_context, input_to_use, weights_to_use, biases, output_to_use, conv_info, depth_multiplier,
- act_info, dilation, output_multipliers_to_use, output_shifts_to_use);
- }
-
- if(_is_quantized)
- {
- _output_multipliers.allocator()->allocate();
- _output_shifts.allocator()->allocate();
- }
-
- // Permute output if needed
- if(_needs_permute)
- {
- // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
- _permuted_output.info()->set_data_layout(DataLayout::NCHW);
- _permute_output_to_nhwc.configure(compile_context, &_permuted_output, output, PermutationVector(2U, 0U, 1U));
-
- // Allocate tensors
- _permuted_input.allocator()->allocate();
- _permuted_output.allocator()->allocate();
- }
- // Configure border handler
- PixelValue &&zero_value(0.f);
- if(is_data_type_quantized_asymmetric(input->info()->data_type()))
- {
- zero_value = PixelValue(static_cast<uint8_t>(input->info()->quantization_info().uniform().offset));
- }
- if(!_is_nhwc || _needs_permute)
- {
- _border_handler->configure(compile_context, input_to_use, _kernel_nchw->border_size(), BorderMode::CONSTANT, zero_value);
- }
-}
-
-Status CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
-{
- return validate_arguments_3x3(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
-}
-
-void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::run()
-{
- prepare();
-
- MemoryGroupResourceScope scope_mg(_memory_group);
-
- if(_needs_permute)
- {
- _permute_input_to_nchw.run();
- }
- CLScheduler::get().enqueue(*_border_handler);
- if(_is_nhwc && !_needs_permute)
- {
- CLScheduler::get().enqueue(*_kernel_nhwc);
- }
- else
- {
- CLScheduler::get().enqueue(*_kernel_nchw);
- }
-
- if(_needs_permute)
- {
- _permute_output_to_nhwc.run();
- }
-}
-
-void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::prepare()
-{
- if(!_is_prepared)
- {
- if(_is_quantized)
- {
- _output_multipliers.map();
- _output_shifts.map();
- quantization::compute_quantized_multipliers_and_shifts(_input->info(),
- _original_weights->info(),
- _output->info(),
- reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
- reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
- _output_multipliers.unmap();
- _output_shifts.unmap();
- }
-
- if(_needs_permute)
- {
- ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
-
- _permuted_weights.allocator()->allocate();
- _permute_weights_to_nchw.run();
- _original_weights->mark_as_unused();
- }
-
- _is_prepared = true;
- }
-}
-
-CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_manager(std::move(memory_manager)), _depth_conv_func(DepthwiseConvolutionFunction::GENERIC), _func_3x3(), _func_generic()
-{
-}
-
-void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
- ActivationLayerInfo act_info, const Size2D &dilation)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
-}
-
-void CLDepthwiseConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info,
- unsigned int depth_multiplier,
- ActivationLayerInfo act_info, const Size2D &dilation)
-{
- if(output == nullptr)
- {
- // In-place
- output = input;
- }
- _depth_conv_func = get_depthwiseconvolution_function(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info,
- dilation);
- switch(_depth_conv_func)
- {
- case DepthwiseConvolutionFunction::OPTIMIZED:
- _func_3x3.set_memory_group(_memory_manager);
- _func_3x3.configure(compile_context, input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
- break;
- case DepthwiseConvolutionFunction::GENERIC:
- {
- _func_generic.set_memory_group(_memory_manager);
- _func_generic.configure(compile_context, input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Unsupported DepthwiseConvolutionFunction");
- }
-}
-
-Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
-{
- DepthwiseConvolutionFunction depth_conv_func = get_depthwiseconvolution_function(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
- switch(depth_conv_func)
- {
- case DepthwiseConvolutionFunction::OPTIMIZED:
- return CLDepthwiseConvolutionLayerInternal3x3::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
- case DepthwiseConvolutionFunction::GENERIC:
- return CLDepthwiseConvolutionLayerGeneric::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
- default:
- ARM_COMPUTE_ERROR("Unsupported DepthwiseConvolutionFunction");
- }
-}
-
-DepthwiseConvolutionFunction CLDepthwiseConvolutionLayer::get_depthwiseconvolution_function(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
-{
- if(bool(CLDepthwiseConvolutionLayerInternal3x3::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation)))
- {
- return DepthwiseConvolutionFunction::OPTIMIZED;
- }
- else
- {
- return DepthwiseConvolutionFunction::GENERIC;
- }
-}
-
-void CLDepthwiseConvolutionLayer::run()
-{
- switch(_depth_conv_func)
- {
- case DepthwiseConvolutionFunction::OPTIMIZED:
- _func_3x3.run();
- break;
- case DepthwiseConvolutionFunction::GENERIC:
- _func_generic.run();
- break;
- default:
- ARM_COMPUTE_ERROR("DepthwiseConvolutionFunction not properly configured");
- }
-}
-
-void CLDepthwiseConvolutionLayer::prepare()
-{
- switch(_depth_conv_func)
- {
- case DepthwiseConvolutionFunction::OPTIMIZED:
- _func_3x3.prepare();
- break;
- case DepthwiseConvolutionFunction::GENERIC:
- _func_generic.prepare();
- break;
- default:
- ARM_COMPUTE_ERROR("DepthwiseConvolutionFunction not properly configured");
- }
-}
} // namespace arm_compute
diff --git a/tests/datasets/DepthwiseConvolutionLayerDataset.h b/tests/datasets/DepthwiseConvolutionLayerDataset.h
index 53b5248374..3b17910eac 100644
--- a/tests/datasets/DepthwiseConvolutionLayerDataset.h
+++ b/tests/datasets/DepthwiseConvolutionLayerDataset.h
@@ -121,7 +121,7 @@ public:
SmallDepthwiseConvolutionLayerDataset()
{
add_config(TensorShape(7U, 7U, 1U), Size2D(1U, 1U), PadStrideInfo(1, 1, 0, 0));
- add_config(TensorShape(23U, 27U, 5U), Size2D(3U, 5U), PadStrideInfo(2, 1, 0, 0));
+ add_config(TensorShape(3U, 3U, 2U), Size2D(2U, 2U), PadStrideInfo(1, 1, 0, 0));
add_config(TensorShape(33U, 27U, 7U), Size2D(7U, 3U), PadStrideInfo(3, 2, 1, 0));
// Asymmetric padding
add_config(TensorShape(33U, 27U, 7U), Size2D(5U, 7U), PadStrideInfo(3, 2, 1, 1, 2, 0, DimensionRoundingType::FLOOR));
diff --git a/tests/validation/CL/DepthwiseConvolutionLayer.cpp b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
index 22922f41a2..79a2678b44 100644
--- a/tests/validation/CL/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
@@ -48,14 +48,13 @@ constexpr RelativeTolerance<float> tolerance_f32(0.01f); /**<
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QASYMM8 */
constexpr float tolerance_num = 0.05f; /**< Tolerance number */
-const auto depth_multipliers = framework::dataset::make("DepthMultiplier", { 1, 2, 5 });
+const auto depth_multipliers = framework::dataset::make("DepthMultiplier", { 1, 5 });
const auto large_depth_multipliers = framework::dataset::make("DepthMultiplier", { 1, 2, 5, 8 });
//Activation Functions
const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
{
ActivationLayerInfo(),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f, 0.f)
});
} // namespace
@@ -481,8 +480,8 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture
combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
depth_multipliers),
framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.3f, 10), QuantizationInfo(2.2f, 10) })),
- framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 4) })),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 128), QuantizationInfo(2.2f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(1.f, 128) })),
framework::dataset::make("DataLayout", { DataLayout::NHWC })), // NCHW is tested with int8
ActivationFunctionsDataset))
{
diff --git a/tests/validation/CL/DepthwiseConvolutionLayerNative.cpp b/tests/validation/CL/DepthwiseConvolutionLayerNative.cpp
index f640ee2b18..f565255719 100644
--- a/tests/validation/CL/DepthwiseConvolutionLayerNative.cpp
+++ b/tests/validation/CL/DepthwiseConvolutionLayerNative.cpp
@@ -62,7 +62,7 @@ RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.01f));
constexpr float abs_tolerance_f16(0.03f);
/** Width values to test - Precommit */
-const auto width_values_precommit = framework::dataset::make("width", { 1U, 17U, 32U } );
+const auto width_values_precommit = framework::dataset::make("width", { 1U, 33U } );
/** Width values to test - Nightly */
const auto width_values_nightly = framework::dataset::make("width", { 53U, 47U } );
@@ -79,6 +79,12 @@ const auto channel_values_precommit = framework::dataset::make("channels", { 15U
/** Channel values to test - Nightly */
const auto channel_values_nightly = framework::dataset::make("channels", { 33U, 19U });
+/** Channel values to test with cl_image support - Precommit */
+const auto channel_values_export_to_cl_image_precommit = framework::dataset::make("channels", { 16U });
+
+/** Channel values to test with cl_image support - Nightly */
+const auto channel_values_export_to_cl_image_nightly = framework::dataset::make("channels", { 32U });
+
/** Batch values to test - Precommit */
const auto batch_values_precommit = framework::dataset::make("batch", { 1U, 2U });
@@ -115,11 +121,17 @@ const auto n0_values_precommit = framework::dataset::make("N0", {2, 4});
/** N0 values to test - Nightly */
const auto n0_values_nightly = framework::dataset::make("N0", {3, 8});
+/** N0 values to test with cl_image support - Precommit */
+const auto n0_values_export_to_cl_image_precommit = framework::dataset::make("N0", {4});
+
+/** N0 values to test with cl_image support - Nightly */
+const auto n0_values_export_to_cl_image_nightly = framework::dataset::make("N0", {8});
+
/** Activation values to test */
const auto act_values = framework::dataset::make("Activation",
{
ActivationLayerInfo(),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.5f),
});
} // namespace
@@ -129,7 +141,7 @@ TEST_SUITE(DepthwiseConvolutionLayerNative)
TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
width_values_precommit,
height_values_precommit),
channel_values_precommit),
@@ -142,14 +154,15 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<fl
framework::dataset::make("DataType", DataType::F32)),
data_layout_values),
act_values),
- n0_values_precommit))
+ n0_values_precommit),
+ framework::dataset::make("ExportToCLImage", false)))
{
// Validate output
validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
}
FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
width_values_nightly,
height_values_nightly),
channel_values_nightly),
@@ -162,16 +175,79 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<fl
framework::dataset::make("DataType", DataType::F32)),
data_layout_values),
act_values),
- n0_values_nightly))
+ n0_values_nightly),
+ framework::dataset::make("ExportToCLImage", false)))
{
// Validate output
validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
}
+
+TEST_SUITE(ExportWeightsToCLImage)
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_precommit,
+ height_values_precommit),
+ channel_values_export_to_cl_image_precommit),
+ batch_values_precommit),
+ kernel_sz_values_precommit),
+ framework::dataset::make("depth_multiplier", 1)),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F32)),
+ data_layout_values),
+ act_values),
+ n0_values_export_to_cl_image_precommit),
+ framework::dataset::make("ExportToCLImage", true)))
+{
+ // Validate output
+ if(_validate_output)
+ {
+ // Validate output
+ validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
+ }
+ else
+ {
+ ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
+ framework::ARM_COMPUTE_PRINT_INFO();
+ }
+}
+
+FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_nightly,
+ height_values_nightly),
+ channel_values_export_to_cl_image_nightly),
+ batch_values_nightly),
+ kernel_sz_values_nightly),
+ framework::dataset::make("depth_multiplier", 1)),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F32)),
+ data_layout_values),
+ act_values),
+ n0_values_export_to_cl_image_nightly),
+ framework::dataset::make("ExportToCLImage", true)))
+{
+ // Validate output
+ if(_validate_output)
+ {
+ // Validate output
+ validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
+ }
+ else
+ {
+ ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
+ framework::ARM_COMPUTE_PRINT_INFO();
+ }
+}
+TEST_SUITE_END() // ExportWeightsToCLImage
TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
width_values_precommit,
height_values_precommit),
channel_values_precommit),
@@ -184,14 +260,15 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<ha
framework::dataset::make("DataType", DataType::F16)),
data_layout_values),
act_values),
- n0_values_precommit))
+ n0_values_precommit),
+ framework::dataset::make("ExportToCLImage", false)))
{
// Validate output
validate(CLAccessor(_target), _reference, rel_tolerance_f16);
}
FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
width_values_nightly,
height_values_nightly),
channel_values_nightly),
@@ -204,18 +281,80 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<ha
framework::dataset::make("DataType", DataType::F16)),
data_layout_values),
act_values),
- n0_values_nightly))
+ n0_values_nightly),
+ framework::dataset::make("ExportToCLImage", false)))
{
// Validate output
validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
}
+TEST_SUITE(ExportWeightsToCLImage)
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_precommit,
+ height_values_precommit),
+ channel_values_export_to_cl_image_precommit),
+ batch_values_precommit),
+ kernel_sz_values_precommit),
+ framework::dataset::make("depth_multiplier", 1)),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F16)),
+ data_layout_values),
+ act_values),
+ n0_values_export_to_cl_image_precommit),
+ framework::dataset::make("ExportToCLImage", true)))
+{
+ // Validate output
+ if(_validate_output)
+ {
+ // Validate output
+ validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
+ }
+ else
+ {
+ ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
+ framework::ARM_COMPUTE_PRINT_INFO();
+ }
+}
+
+FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_nightly,
+ height_values_nightly),
+ channel_values_export_to_cl_image_nightly),
+ batch_values_nightly),
+ kernel_sz_values_nightly),
+ framework::dataset::make("depth_multiplier", 1)),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F16)),
+ data_layout_values),
+ act_values),
+ n0_values_export_to_cl_image_nightly),
+ framework::dataset::make("ExportToCLImage", true)))
+{
+ // Validate output
+ if(_validate_output)
+ {
+ // Validate output
+ validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
+ }
+ else
+ {
+ ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
+ framework::ARM_COMPUTE_PRINT_INFO();
+ }
+}
+TEST_SUITE_END() // ExportWeightsToCLImage
TEST_SUITE_END() // FP16
TEST_SUITE_END() // Float
TEST_SUITE(DepthMultiplier)
TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
width_values_precommit,
height_values_precommit),
channel_values_precommit),
@@ -228,14 +367,15 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<fl
framework::dataset::make("DataType", DataType::F32)),
data_layout_values),
act_values),
- framework::dataset::make("N0", 1)))
+ framework::dataset::make("N0", 1)),
+ framework::dataset::make("ExportToCLImage", false)))
{
// Validate output
validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
}
FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
width_values_nightly,
height_values_nightly),
channel_values_nightly),
@@ -248,7 +388,8 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<fl
framework::dataset::make("DataType", DataType::F32)),
data_layout_values),
act_values),
- framework::dataset::make("N0", 1)))
+ framework::dataset::make("N0", 1)),
+ framework::dataset::make("ExportToCLImage", false)))
{
// Validate output
validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
@@ -257,7 +398,7 @@ TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
width_values_precommit,
height_values_precommit),
channel_values_precommit),
@@ -270,14 +411,15 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<ha
framework::dataset::make("DataType", DataType::F16)),
data_layout_values),
act_values),
- framework::dataset::make("N0", 1)))
+ framework::dataset::make("N0", 1)),
+ framework::dataset::make("ExportToCLImage", false)))
{
// Validate output
validate(CLAccessor(_target), _reference, rel_tolerance_f16);
}
FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
width_values_nightly,
height_values_nightly),
channel_values_nightly),
@@ -290,7 +432,8 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<ha
framework::dataset::make("DataType", DataType::F16)),
data_layout_values),
act_values),
- framework::dataset::make("N0", 1)))
+ framework::dataset::make("N0", 1)),
+ framework::dataset::make("ExportToCLImage", false)))
{
// Validate output
validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index c255cc5c13..0e02ae28ca 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -198,7 +198,7 @@ protected:
{
case DataType::QASYMM8:
{
- std::uniform_int_distribution<uint8_t> distribution(0, 10);
+ std::uniform_int_distribution<uint8_t> distribution(0, 15);
library->fill(tensor, distribution, i);
break;
}
@@ -292,7 +292,7 @@ public:
if(padding_valid)
{
- _conv_info = PadStrideInfo();
+ _conv_info = PadStrideInfo(stride.width, stride.height);
}
else
{
@@ -333,6 +333,7 @@ public:
void allocate_and_run_target()
{
add_padding_x({ &_src, &_weights, &_biases, &_target }, _data_layout);
+ add_padding_y({ &_src, &_target }, _data_layout);
// Allocate tensors
_src.allocator()->allocate();
@@ -416,15 +417,16 @@ class DepthwiseConvolutionLayerNativeConfigurableValidationFixture : public Dept
public:
template <typename...>
void setup(size_t width, size_t height, size_t channel, size_t batch, Size2D kernel_size, size_t depth_multiplier, Size2D dilation, Size2D stride, bool padding_valid, DataType data_type,
- DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0)
+ DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0, bool export_to_cl_image)
{
- _dilation = dilation;
- _depth_multiplier = depth_multiplier;
- _data_type = data_type;
- _data_layout = data_layout;
- _act_info = act_info;
- _n0 = n0;
- _in_place = in_place;
+ _dilation = dilation;
+ _depth_multiplier = depth_multiplier;
+ _data_type = data_type;
+ _data_layout = data_layout;
+ _act_info = act_info;
+ _n0 = n0;
+ _export_to_cl_image = export_to_cl_image;
+ _in_place = in_place;
_input_shape = TensorShape(width, height, channel, batch);
_weights_shape = TensorShape(kernel_size.width, kernel_size.height, channel * _depth_multiplier);
@@ -432,11 +434,11 @@ public:
if(padding_valid)
{
- _conv_info = PadStrideInfo();
+ _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, _dilation);
}
else
{
- _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, _dilation);
+ _conv_info = PadStrideInfo(stride.width, stride.height);
}
}
@@ -462,14 +464,26 @@ public:
target_to_use = &_target;
}
- DWCWeightsKernelInfo dwc_weights_info;
- dwc_weights_info.n0 = _n0;
+ DWCComputeKernelInfo dwc_info;
+ dwc_info.n0 = _n0;
+ dwc_info.m0 = _conv_info.stride().first == 1 && _dilation.x() == 1 ? 8 : 1;
+ dwc_info.export_weights_to_cl_image = _export_to_cl_image;
- DWCKernelInfo dwc_info;
- dwc_info.activation_info = _act_info;
+#if defined(ARM_COMPUTE_OPENCL_ENABLED)
+ if(_export_to_cl_image)
+ {
+ _validate_output |= image2d_from_buffer_supported(CLKernelLibrary::get().get_device());
+ _validate_output |= (get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) != 0);
+ }
+#endif // ARM_COMPUTE_OPENCL_ENABLED
+
+ const ConvolutionInfo conv_kernel_info
+ {
+ _conv_info, _depth_multiplier, _act_info, _dilation
+ };
// Create Depthwise Convolution configure function
- _dwc.configure(&_src, &_weights, &_biases, target_to_use, dwc_weights_info, dwc_info, _conv_info, _depth_multiplier, _dilation);
+ _dwc.configure(&_src, &_weights, &_biases, target_to_use, dwc_info, conv_kernel_info);
ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
@@ -479,7 +493,8 @@ public:
void allocate_and_run_target()
{
- add_padding_x({ &_src, &_weights, &_biases, &_target }, _data_layout);
+ add_padding_x({ &_src, &_biases, &_target }, _data_layout);
+ add_padding_x({ &_weights }, _data_layout, _export_to_cl_image); // Don't add left padding if cl image will be used
// Allocate tensors
_src.allocator()->allocate();
@@ -508,7 +523,10 @@ public:
}
// Compute function
- _dwc.run();
+ if(_validate_output)
+ {
+ _dwc.run();
+ }
// Reinstating original data layout for the test suite to properly check the values
if(!_in_place)
@@ -529,7 +547,10 @@ public:
const ConvolutionInfo info{ _conv_info, _depth_multiplier, _act_info, _dilation };
const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), info);
- _reference = reference::activation_layer(reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation), _act_info);
+ if(_validate_output)
+ {
+ _reference = reference::activation_layer(reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation), _act_info);
+ }
}
protected:
@@ -573,6 +594,8 @@ protected:
Size2D _dilation{};
unsigned int _depth_multiplier{};
unsigned int _n0{};
+ bool _export_to_cl_image{};
+ bool _validate_output{ true };
bool _in_place{ false };
};