diff options
26 files changed, 698 insertions, 490 deletions
diff --git a/arm_compute/runtime/CL/functions/CLFFT1D.h b/arm_compute/runtime/CL/functions/CLFFT1D.h index e88ee7650d..731bad5c32 100644 --- a/arm_compute/runtime/CL/functions/CLFFT1D.h +++ b/arm_compute/runtime/CL/functions/CLFFT1D.h @@ -61,7 +61,7 @@ public: ~CLFFT1D(); /** Initialise the function's source, destinations and border mode. * - * @param[in] input Source tensor. Data types supported: F32. + * @param[in] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input. * @param[in] config FFT related configuration */ @@ -69,14 +69,14 @@ public: /** Initialise the function's source, destinations and border mode. * * @param[in] compile_context The compile context to be used. - * @param[in] input Source tensor. Data types supported: F32. + * @param[in] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input. * @param[in] config FFT related configuration */ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const FFT1DInfo &config); /** Static function to check if given info will lead to a valid configuration of @ref CLFFT1D. * - * @param[in] input Source tensor info. Data types supported: F32. + * @param[in] input Source tensor info. Data types supported: F16/F32. * @param[in] output Destination tensor info. Data types and data layouts supported: Same as @p input. * @param[in] config FFT related configuration * diff --git a/arm_compute/runtime/CL/functions/CLFFT2D.h b/arm_compute/runtime/CL/functions/CLFFT2D.h index c54127f209..adc8e46cb2 100644 --- a/arm_compute/runtime/CL/functions/CLFFT2D.h +++ b/arm_compute/runtime/CL/functions/CLFFT2D.h @@ -58,7 +58,7 @@ public: ~CLFFT2D(); /** Initialise the function's source, destinations and border mode. * - * @param[in] input Source tensor. Data types supported: F32. + * @param[in] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input. * @param[in] config FFT related configuration */ @@ -66,14 +66,14 @@ public: /** Initialise the function's source, destinations and border mode. * * @param[in] compile_context The compile context to be used. - * @param[in] input Source tensor. Data types supported: F32. + * @param[in] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input. * @param[in] config FFT related configuration */ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const FFT2DInfo &config); /** Static function to check if given info will lead to a valid configuration of @ref CLFFT2D. * - * @param[in] input Source tensor info. Data types supported: F32. + * @param[in] input Source tensor info. Data types supported: F16/F32. * @param[in] output Destination tensor info. Data types and data layouts supported: Same as @p input. * @param[in] config FFT related configuration * diff --git a/arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h index 53ce63333b..5085f5a66c 100644 --- a/arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h @@ -73,53 +73,59 @@ public: * * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout * - * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. - * Data types supported: F32. - * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. - * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input - * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. - * Data types supported: Same as @p input. - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input + * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation + * available which may introduce a drop of accuracy as well. Default is false */ void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info = ActivationLayerInfo()); + const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false); /** Set the input and output tensors. * * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout * - * @param[in] compile_context The compile context to be used. - * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. - * Data types supported: F32. - * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. - * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input - * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. - * Data types supported: Same as @p input. - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] compile_context The compile context to be used. + * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input + * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation + * available which may introduce a drop of accuracy as well. Default is false */ void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info = ActivationLayerInfo()); + const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false); /** Static function to check if given info will lead to a valid configuration of @ref CLFFTConvolutionLayer * * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout * - * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. - * Data types supported: F32. - * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. - * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input - * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. - * Data types supported: Same as @p input. - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input + * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation + * available which may introduce a drop of accuracy as well. Default is false * * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info = ActivationLayerInfo()); + const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false); // Inherited methods overridden: void run() override; diff --git a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h index 6432cd040d..b48f6eb6cc 100644 --- a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h +++ b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h @@ -120,7 +120,7 @@ public: /** Initialise the kernel's inputs, output. * * @param[in] compile_context The compile context to be used. - * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2. + * @param[in, out] input1 An input tensor. Data types supported: F16/F32. Number of channels supported: 2. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. @@ -130,7 +130,7 @@ public: void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplication * - * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2. + * @param[in] input1 An input tensor info. Data types supported: F16/F32. Number of channels supported: 2. * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1. * @param[in] output The output tensor info, Data types supported: same as @p input1. Number of channels supported: same as @p input1. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. @@ -277,7 +277,7 @@ public: CLComplexPixelWiseMultiplication &operator=(CLComplexPixelWiseMultiplication &&); /** Initialise the kernel's inputs, output. * - * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2. + * @param[in, out] input1 An input tensor. Data types supported: F16/F32. Number of channels supported: 2. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. @@ -288,7 +288,7 @@ public: /** Initialise the kernel's inputs, output. * * @param[in] compile_context The compile context to be used. - * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2. + * @param[in, out] input1 An input tensor. Data types supported: F16/F32. Number of channels supported: 2. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. @@ -298,7 +298,7 @@ public: void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplication * - * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2. + * @param[in] input1 An input tensor info. Data types supported: F16/F32. Number of channels supported: 2. * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1. * @param[in] output The output tensor info, Data types supported: same as @p input1. Number of channels supported: same as @p input1. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. diff --git a/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h index 37750e243b..b181e05c1a 100644 --- a/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h +++ b/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h @@ -75,36 +75,38 @@ public: * * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout * - * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. - * Data types supported: F32. - * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. - * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input - * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. - * Data types supported: Same as @p input. - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input + * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] enable_fast_math (Optional) Enable fast math computation. Unused for NEON backend. */ void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info = ActivationLayerInfo()); + const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false); /** Static function to check if given info will lead to a valid configuration of @ref NEFFTConvolutionLayer * * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout * - * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. - * Data types supported: F32. - * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. - * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input - * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. - * Data types supported: Same as @p input. - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input + * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * @param[in] enable_fast_math (Optional) Enable fast math computation. Unused for NEON backend. * * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info = ActivationLayerInfo()); + const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false); // Inherited methods overridden: void run() override; diff --git a/src/core/CL/cl_kernels/fft.cl b/src/core/CL/cl_kernels/fft.cl index eb1eec56e7..b257451652 100644 --- a/src/core/CL/cl_kernels/fft.cl +++ b/src/core/CL/cl_kernels/fft.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,6 +23,7 @@ */ #include "helpers.h" +#if defined(DATA_TYPE) /** Calculates and applies the twiddle factor to a given input. * * @param[in] phi The angle. @@ -30,7 +31,8 @@ */ #define TWIDDLE_FACTOR_MULTIPLICATION(phi, input) \ { \ - float2 w, tmp; \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + w, tmp; \ w.x = native_cos(phi); \ w.y = native_sin(phi); \ tmp.x = (w.x * input.x) - (w.y * input.y); \ @@ -43,12 +45,13 @@ * @param[in,out] c0 Complex input 0. * @param[in,out] c1 Complex input 1. */ -#define DFT_2(c0, c1) \ - { \ - float2 v0; \ - v0 = c0; \ - c0 = v0 + c1; \ - c1 = v0 - c1; \ +#define DFT_2(c0, c1) \ + { \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + v0; \ + v0 = c0; \ + c0 = v0 + c1; \ + c1 = v0 - c1; \ } // radix-3 butterfly unit factors @@ -60,15 +63,17 @@ * @param[in,out] c1 Complex input 1. * @param[in,out] c2 Complex input 2. */ -#define DFT_3(c0, c1, c2) \ - { \ - float2 v0 = c1 + c2; \ - float2 v1 = c1 - c2; \ - c1.x = c0.x - 0.5f * v0.x + v1.y * SQRT3DIV2; \ - c1.y = c0.y - 0.5f * v0.y - v1.x * SQRT3DIV2; \ - c2.x = c0.x - 0.5f * v0.x - v1.y * SQRT3DIV2; \ - c2.y = c0.y - 0.5f * v0.y + v1.x * SQRT3DIV2; \ - c0 = c0 + v0; \ +#define DFT_3(c0, c1, c2) \ + { \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + v0 = c1 + c2; \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + v1 = c1 - c2; \ + c1.x = c0.x - 0.5f * v0.x + v1.y * SQRT3DIV2; \ + c1.y = c0.y - 0.5f * v0.y - v1.x * SQRT3DIV2; \ + c2.x = c0.x - 0.5f * v0.x - v1.y * SQRT3DIV2; \ + c2.y = c0.y - 0.5f * v0.y + v1.x * SQRT3DIV2; \ + c0 = c0 + v0; \ } /**Computes radix-4 butterfly unit. @@ -78,25 +83,26 @@ * @param[in,out] c2 Complex input 2. * @param[in,out] c3 Complex input 3. */ -#define DFT_4(c0, c1, c2, c3) \ - { \ - float2 v0, v1, v2, v3; \ - v0 = c0 + c2; \ - v1 = c1 + c3; \ - v2 = c0 - c2; \ - v3.x = c1.y - c3.y; \ - v3.y = c3.x - c1.x; \ - c0 = v0 + v1; \ - c2 = v0 - v1; \ - c1 = v2 + v3; \ - c3 = v2 - v3; \ +#define DFT_4(c0, c1, c2, c3) \ + { \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + v0, v1, v2, v3; \ + v0 = c0 + c2; \ + v1 = c1 + c3; \ + v2 = c0 - c2; \ + v3.x = c1.y - c3.y; \ + v3.y = c3.x - c1.x; \ + c0 = v0 + v1; \ + c2 = v0 - v1; \ + c1 = v2 + v3; \ + c3 = v2 - v3; \ } // radix-5 butterfly unit factors -#define W5_A 0.30901699437494f -#define W5_B 0.95105651629515f -#define W5_C 0.80901699437494f -#define W5_D 0.58778525229247f +#define W5_A (DATA_TYPE)0.30901699437494f +#define W5_B (DATA_TYPE)0.95105651629515f +#define W5_C (DATA_TYPE)0.80901699437494f +#define W5_D (DATA_TYPE)0.58778525229247f /** Computes radix-5 butterfly unit. * @@ -106,28 +112,29 @@ * @param[in,out] c3 Complex input 3. * @param[in,out] c4 Complex input 4. */ -#define DFT_5(c0, c1, c2, c3, c4) \ - { \ - float2 v0, v1, v2, v3, v4; \ - v0 = c0; \ - v1 = W5_A * (c1 + c4) - W5_C * (c2 + c3); \ - v2 = W5_C * (c1 + c4) - W5_A * (c2 + c3); \ - v3 = W5_D * (c1 - c4) - W5_B * (c2 - c3); \ - v4 = W5_B * (c1 - c4) + W5_D * (c2 - c3); \ - c0 = v0 + c1 + c2 + c3 + c4; \ - c1 = v0 + v1 + (float2)(v4.y, -v4.x); \ - c2 = v0 - v2 + (float2)(v3.y, -v3.x); \ - c3 = v0 - v2 + (float2)(-v3.y, v3.x); \ - c4 = v0 + v1 + (float2)(-v4.y, v4.x); \ +#define DFT_5(c0, c1, c2, c3, c4) \ + { \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + v0, v1, v2, v3, v4; \ + v0 = c0; \ + v1 = W5_A * (c1 + c4) - W5_C * (c2 + c3); \ + v2 = W5_C * (c1 + c4) - W5_A * (c2 + c3); \ + v3 = W5_D * (c1 - c4) - W5_B * (c2 - c3); \ + v4 = W5_B * (c1 - c4) + W5_D * (c2 - c3); \ + c0 = v0 + c1 + c2 + c3 + c4; \ + c1 = v0 + v1 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v4.y, -v4.x); \ + c2 = v0 - v2 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v3.y, -v3.x); \ + c3 = v0 - v2 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v3.y, v3.x); \ + c4 = v0 + v1 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v4.y, v4.x); \ } // radix-7 butterfly unit factors -#define W7_A 0.62348980185873f -#define W7_B 0.78183148246802f -#define W7_C 0.22252093395631f -#define W7_D 0.97492791218182f -#define W7_E 0.90096886790241f -#define W7_F 0.43388373911755f +#define W7_A (DATA_TYPE)0.62348980185873f +#define W7_B (DATA_TYPE)0.78183148246802f +#define W7_C (DATA_TYPE)0.22252093395631f +#define W7_D (DATA_TYPE)0.97492791218182f +#define W7_E (DATA_TYPE)0.90096886790241f +#define W7_F (DATA_TYPE)0.43388373911755f /** Computes radix-7 butterfly unit. * @@ -141,7 +148,8 @@ */ #define DFT_7(c0, c1, c2, c3, c4, c5, c6) \ { \ - float2 v0, v1, v2, v3, v4, v5, v6; \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + v0, v1, v2, v3, v4, v5, v6; \ v0 = c0; \ v1 = W7_A * (c1 + c6) - W7_C * (c2 + c5) - W7_E * (c3 + c4); \ v2 = W7_C * (c1 + c6) + W7_E * (c2 + c5) - W7_A * (c3 + c4); \ @@ -150,12 +158,12 @@ v5 = W7_D * (c1 - c6) - W7_F * (c2 - c5) - W7_B * (c3 - c4); \ v6 = W7_F * (c1 - c6) - W7_B * (c2 - c5) + W7_D * (c3 - c4); \ c0 = v0 + c1 + c2 + c3 + c4 + c5 + c6; \ - c1 = v0 + v1 + (float2)(v4.y, -v4.x); \ - c2 = v0 - v2 + (float2)(v5.y, -v5.x); \ - c3 = v0 - v3 + (float2)(v6.y, -v6.x); \ - c4 = v0 - v3 + (float2)(-v6.y, v6.x); \ - c5 = v0 - v2 + (float2)(-v5.y, v5.x); \ - c6 = v0 + v1 + (float2)(-v4.y, v4.x); \ + c1 = v0 + v1 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v4.y, -v4.x); \ + c2 = v0 - v2 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v5.y, -v5.x); \ + c3 = v0 - v3 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v6.y, -v6.x); \ + c4 = v0 - v3 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v6.y, v6.x); \ + c5 = v0 - v2 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v5.y, v5.x); \ + c6 = v0 + v1 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v4.y, v4.x); \ } /** Computes radix-8 butterfly unit. @@ -169,52 +177,55 @@ * @param[in,out] c6 Complex input 6. * @param[in,out] c7 Complex input 7. */ -#define DFT_8(c0, c1, c2, c3, c4, c5, c6, c7) \ - { \ - float2 v0, v1, v2, v3, v4, v5, v6, v7; \ - float2 s0, s1, s2, s3, s4, s5, s6, s7; \ - float2 t0, t1, t2; \ - v0 = c0 + c4; \ - v1 = c1 + c5; \ - v2 = c2 + c6; \ - v3 = c3 + c7; \ - v4 = c0 - c4; \ - v5 = c1 - c5; \ - v6 = c2 - c6; \ - v7 = c3 - c7; \ - s0 = v0 + v2; \ - s1 = v1 + v3; \ - s2 = v0 - v2; \ - s3 = v1 - v3; \ - s4.x = v4.x - v6.y; \ - s4.y = v4.y + v6.x; \ - s5.x = v5.x - v7.y; \ - s5.y = v5.y + v7.x; \ - s6.x = v4.x + v6.y; \ - s6.y = v4.y - v6.x; \ - s7.x = v5.x + v7.y; \ - s7.y = v5.y - v7.x; \ - t0.x = -s3.y; \ - t0.y = s3.x; \ - t1.x = M_SQRT1_2_F * (s5.x - s5.y); \ - t1.y = M_SQRT1_2_F * (s5.x + s5.y); \ - t2.x = -M_SQRT1_2_F * (s7.x + s7.y); \ - t2.y = M_SQRT1_2_F * (s7.x - s7.y); \ - c0 = s0 + s1; \ - c1 = s6 - t2; \ - c2 = s2 - t0; \ - c3 = s4 - t1; \ - c4 = s0 - s1; \ - c5 = s6 + t2; \ - c6 = s2 + t0; \ - c7 = s4 + t1; \ +#define DFT_8(c0, c1, c2, c3, c4, c5, c6, c7) \ + { \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + v0, v1, v2, v3, v4, v5, v6, v7; \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + s0, s1, s2, s3, s4, s5, s6, s7; \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + t0, t1, t2; \ + v0 = c0 + c4; \ + v1 = c1 + c5; \ + v2 = c2 + c6; \ + v3 = c3 + c7; \ + v4 = c0 - c4; \ + v5 = c1 - c5; \ + v6 = c2 - c6; \ + v7 = c3 - c7; \ + s0 = v0 + v2; \ + s1 = v1 + v3; \ + s2 = v0 - v2; \ + s3 = v1 - v3; \ + s4.x = v4.x - v6.y; \ + s4.y = v4.y + v6.x; \ + s5.x = v5.x - v7.y; \ + s5.y = v5.y + v7.x; \ + s6.x = v4.x + v6.y; \ + s6.y = v4.y - v6.x; \ + s7.x = v5.x + v7.y; \ + s7.y = v5.y - v7.x; \ + t0.x = -s3.y; \ + t0.y = s3.x; \ + t1.x = M_SQRT1_2_F * (s5.x - s5.y); \ + t1.y = M_SQRT1_2_F * (s5.x + s5.y); \ + t2.x = -M_SQRT1_2_F * (s7.x + s7.y); \ + t2.y = M_SQRT1_2_F * (s7.x - s7.y); \ + c0 = s0 + s1; \ + c1 = s6 - t2; \ + c2 = s2 - t0; \ + c3 = s4 - t1; \ + c4 = s0 - s1; \ + c5 = s6 + t2; \ + c6 = s2 + t0; \ + c7 = s4 + t1; \ } /** Computes the first stage of a radix-2 DFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -231,7 +242,7 @@ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_2_first_stage_axis_0( +__kernel void fft_radix_2_first_stage_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -248,20 +259,21 @@ kernel void fft_radix_2_first_stage_axis_0( #endif /* IN_PLACE */ // Load two complex input values - float4 data = vload4(0, (__global float *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 4) + data = vload4(0, (__global DATA_TYPE *)input.ptr); // Compute DFT N = 2 DFT_2(data.s01, data.s23); // Store two complex output values - vstore4(data, 0, (__global float *)output.ptr); + vstore4(data, 0, (__global DATA_TYPE *)output.ptr); } /** Computes the first stage of a radix-2 DFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -278,7 +290,7 @@ kernel void fft_radix_2_first_stage_axis_0( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_2_first_stage_axis_1( +__kernel void fft_radix_2_first_stage_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -295,22 +307,24 @@ kernel void fft_radix_2_first_stage_axis_1( #endif /* IN_PLACE */ // Load two complex input values - float2 data1 = vload2(0, (__global float *)input.ptr); - float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data1 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); // Compute DFT N = 2 DFT_2(data1, data2); // Store two complex output values - vstore2(data1, 0, (__global float *)output.ptr); - vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0)); + vstore2(data1, 0, (__global DATA_TYPE *)output.ptr); + vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); } /** Computes the first stage of a radix-3 DFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -327,7 +341,7 @@ kernel void fft_radix_2_first_stage_axis_1( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_3_first_stage_axis_0( +__kernel void fft_radix_3_first_stage_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -344,22 +358,24 @@ kernel void fft_radix_3_first_stage_axis_0( #endif /* IN_PLACE */ // Load three complex input values - float4 data0 = vload4(0, (__global float *)input.ptr); - float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 2, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 4) + data0 = vload4(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2, 0, 0)); // Compute DFT N = 3 DFT_3(data0.s01, data0.s23, data1.s01); // Store three complex output values - vstore4(data0, 0, (__global float *)output.ptr); - vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 2, 0, 0)); + vstore4(data0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2, 0, 0)); } /** Computes the first stage of a radix-3 DFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -376,7 +392,7 @@ kernel void fft_radix_3_first_stage_axis_0( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_3_first_stage_axis_1( +__kernel void fft_radix_3_first_stage_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -393,24 +409,27 @@ kernel void fft_radix_3_first_stage_axis_1( #endif /* IN_PLACE */ // Load three complex input values - float2 data0 = vload2(0, (__global float *)input.ptr); - float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0)); - float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); // Compute DFT N = 3 DFT_3(data0, data1, data2); // Store three complex output values - vstore2(data0, 0, (__global float *)output.ptr); - vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0)); - vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0)); + vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); + vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); } /** Computes the first stage of a radix-4 DFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -427,7 +446,7 @@ kernel void fft_radix_3_first_stage_axis_1( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_4_first_stage_axis_0( +__kernel void fft_radix_4_first_stage_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -444,20 +463,21 @@ kernel void fft_radix_4_first_stage_axis_0( #endif /* IN_PLACE */ // Load four complex input values - float8 data = vload8(0, (__global float *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 8) + data = vload8(0, (__global DATA_TYPE *)input.ptr); // Compute DFT N = 4 DFT_4(data.s01, data.s23, data.s45, data.s67); // Store four complex output values - vstore8(data, 0, (__global float *)output.ptr); + vstore8(data, 0, (__global DATA_TYPE *)output.ptr); } /** Computes the first stage of a radix-4 DFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -474,7 +494,7 @@ kernel void fft_radix_4_first_stage_axis_0( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_4_first_stage_axis_1( +__kernel void fft_radix_4_first_stage_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -491,26 +511,30 @@ kernel void fft_radix_4_first_stage_axis_1( #endif /* IN_PLACE */ // Load four complex input values - float2 data0 = vload2(0, (__global float *)input.ptr); - float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0)); - float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0)); - float2 data3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3, 0)); // Compute DFT N = 4 DFT_4(data0, data1, data2, data3); // Store four complex output values - vstore2(data0, 0, (__global float *)output.ptr); - vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0)); - vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0)); - vstore2(data3, 0, (__global float *)tensor3D_offset(&output, 0, 3, 0)); + vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); + vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); + vstore2(data3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3, 0)); } /** Computes the first stage of a radix-5 DFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -527,7 +551,7 @@ kernel void fft_radix_4_first_stage_axis_1( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_5_first_stage_axis_0( +__kernel void fft_radix_5_first_stage_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -544,22 +568,24 @@ kernel void fft_radix_5_first_stage_axis_0( #endif /* IN_PLACE */ // Load five complex input values - float8 data0 = vload8(0, (__global float *)input.ptr); - float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 4, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 8) + data0 = vload8(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4, 0, 0)); // Compute DFT N = 5 DFT_5(data0.s01, data0.s23, data0.s45, data0.s67, data1.s01); // Store five complex output values - vstore8(data0, 0, (__global float *)output.ptr); - vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 4, 0, 0)); + vstore8(data0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4, 0, 0)); } /** Computes the first stage of a radix-5 DFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -576,7 +602,7 @@ kernel void fft_radix_5_first_stage_axis_0( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_5_first_stage_axis_1( +__kernel void fft_radix_5_first_stage_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -593,28 +619,33 @@ kernel void fft_radix_5_first_stage_axis_1( #endif /* IN_PLACE */ // Load five complex input values - float2 data0 = vload2(0, (__global float *)input.ptr); - float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0)); - float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0)); - float2 data3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3, 0)); - float2 data4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4, 0)); // Compute DFT N = 5 DFT_5(data0, data1, data2, data3, data4); // Store five complex output values - vstore2(data0, 0, (__global float *)output.ptr); - vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0)); - vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0)); - vstore2(data3, 0, (__global float *)tensor3D_offset(&output, 0, 3, 0)); - vstore2(data4, 0, (__global float *)tensor3D_offset(&output, 0, 4, 0)); + vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); + vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); + vstore2(data3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3, 0)); + vstore2(data4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4, 0)); } /** Computes the first stage of a radix-7 DFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -631,7 +662,7 @@ kernel void fft_radix_5_first_stage_axis_1( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_7_first_stage_axis_0( +__kernel void fft_radix_7_first_stage_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -648,24 +679,27 @@ kernel void fft_radix_7_first_stage_axis_0( #endif /* IN_PLACE */ // Load seven complex input values - float8 data0 = vload8(0, (__global float *)input.ptr); - float4 data1 = vload4(0, (__global float *)tensor3D_offset(&input, 4, 0, 0)); - float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 6, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 8) + data0 = vload8(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 4) + data1 = vload4(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 6, 0, 0)); // Compute DFT N = 7 DFT_7(data0.s01, data0.s23, data0.s45, data0.s67, data1.s01, data1.s23, data2.s01); // Store seven complex output values - vstore8(data0, 0, (__global float *)output.ptr); - vstore4(data1, 0, (__global float *)tensor3D_offset(&output, 4, 0, 0)); - vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 6, 0, 0)); + vstore8(data0, 0, (__global DATA_TYPE *)output.ptr); + vstore4(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4, 0, 0)); + vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 6, 0, 0)); } /** Computes the first stage of a radix-7 DFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -682,7 +716,7 @@ kernel void fft_radix_7_first_stage_axis_0( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_7_first_stage_axis_1( +__kernel void fft_radix_7_first_stage_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -699,32 +733,39 @@ kernel void fft_radix_7_first_stage_axis_1( #endif /* IN_PLACE */ // Load seven complex input values - float2 data0 = vload2(0, (__global float *)input.ptr); - float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0)); - float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0)); - float2 data3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3, 0)); - float2 data4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4, 0)); - float2 data5 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 5, 0)); - float2 data6 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 6, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 5, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 6, 0)); // Compute DFT N = 7 DFT_7(data0, data1, data2, data3, data4, data5, data6); // Store seven complex output values - vstore2(data0, 0, (__global float *)output.ptr); - vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0)); - vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0)); - vstore2(data3, 0, (__global float *)tensor3D_offset(&output, 0, 3, 0)); - vstore2(data4, 0, (__global float *)tensor3D_offset(&output, 0, 4, 0)); - vstore2(data5, 0, (__global float *)tensor3D_offset(&output, 0, 5, 0)); - vstore2(data6, 0, (__global float *)tensor3D_offset(&output, 0, 6, 0)); + vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); + vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); + vstore2(data3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3, 0)); + vstore2(data4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4, 0)); + vstore2(data5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 5, 0)); + vstore2(data6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 6, 0)); } /** Computes the first stage of a radix-8 DFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -741,7 +782,7 @@ kernel void fft_radix_7_first_stage_axis_1( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_8_first_stage_axis_0( +__kernel void fft_radix_8_first_stage_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -758,20 +799,21 @@ kernel void fft_radix_8_first_stage_axis_0( #endif /* IN_PLACE */ // Load eight complex input values - float16 data = vload16(0, (__global float *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 16) + data = vload16(0, (__global DATA_TYPE *)input.ptr); // Compute DFT N = 8 DFT_8(data.s01, data.s23, data.s45, data.s67, data.s89, data.sAB, data.sCD, data.sEF); // Store eight complex output values - vstore16(data, 0, (__global float *)output.ptr); + vstore16(data, 0, (__global DATA_TYPE *)output.ptr); } /** Computes the first stage of a radix-8 DFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -788,7 +830,7 @@ kernel void fft_radix_8_first_stage_axis_0( * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -kernel void fft_radix_8_first_stage_axis_1( +__kernel void fft_radix_8_first_stage_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -805,34 +847,42 @@ kernel void fft_radix_8_first_stage_axis_1( #endif /* IN_PLACE */ // Load eight complex input values - float2 data0 = vload2(0, (__global float *)input.ptr); - float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0)); - float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0)); - float2 data3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3, 0)); - float2 data4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4, 0)); - float2 data5 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 5, 0)); - float2 data6 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 6, 0)); - float2 data7 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 7, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 5, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 6, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + data7 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 7, 0)); // Compute DFT N = 8 DFT_8(data0, data1, data2, data3, data4, data5, data6, data7); // Store eight complex output values - vstore2(data0, 0, (__global float *)output.ptr); - vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0)); - vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0)); - vstore2(data3, 0, (__global float *)tensor3D_offset(&output, 0, 3, 0)); - vstore2(data4, 0, (__global float *)tensor3D_offset(&output, 0, 4, 0)); - vstore2(data5, 0, (__global float *)tensor3D_offset(&output, 0, 5, 0)); - vstore2(data6, 0, (__global float *)tensor3D_offset(&output, 0, 6, 0)); - vstore2(data7, 0, (__global float *)tensor3D_offset(&output, 0, 7, 0)); + vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); + vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); + vstore2(data3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3, 0)); + vstore2(data4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4, 0)); + vstore2(data5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 5, 0)); + vstore2(data6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 6, 0)); + vstore2(data7, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 7, 0)); } /** Computes a stage of a radix-2 FFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -852,7 +902,7 @@ kernel void fft_radix_8_first_stage_axis_1( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_2_axis_0( +__kernel void fft_radix_2_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -881,11 +931,13 @@ kernel void fft_radix_2_axis_0( #endif /* IN_PLACE */ // Load two complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -894,15 +946,15 @@ kernel void fft_radix_2_axis_0( DFT_2(c0, c1); // Store two complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, Nx, 0, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); } /** Computes a stage of a radix-2 FFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -922,7 +974,7 @@ kernel void fft_radix_2_axis_0( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_2_axis_1( +__kernel void fft_radix_2_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -951,11 +1003,13 @@ kernel void fft_radix_2_axis_1( #endif /* IN_PLACE */ // Load two complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -964,15 +1018,15 @@ kernel void fft_radix_2_axis_1( DFT_2(c0, c1); // Store two complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); } /** Computes a stage of a radix-3 FFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -992,7 +1046,7 @@ kernel void fft_radix_2_axis_1( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_3_axis_0( +__kernel void fft_radix_3_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1021,12 +1075,15 @@ kernel void fft_radix_3_axis_0( #endif /* IN_PLACE */ // Load three complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, Nx, 0, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 2 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1036,16 +1093,16 @@ kernel void fft_radix_3_axis_0( DFT_3(c0, c1, c2); // Store three complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, Nx, 0, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 2 * Nx, 0, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); } /** Computes a stage of a radix-3 FFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -1065,7 +1122,7 @@ kernel void fft_radix_3_axis_0( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_3_axis_1( +__kernel void fft_radix_3_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1094,12 +1151,15 @@ kernel void fft_radix_3_axis_1( #endif /* IN_PLACE */ // Load three complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1109,16 +1169,16 @@ kernel void fft_radix_3_axis_1( DFT_3(c0, c1, c2); // Store three complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); } /** Computes a stage of a radix-4 FFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -1138,7 +1198,7 @@ kernel void fft_radix_3_axis_1( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_4_axis_0( +__kernel void fft_radix_4_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1167,13 +1227,17 @@ kernel void fft_radix_4_axis_0( #endif /* IN_PLACE */ // Load four complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, Nx, 0, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 2 * Nx, 0, 0)); - float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 3 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 3 * Nx, 0, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1184,17 +1248,17 @@ kernel void fft_radix_4_axis_0( DFT_4(c0, c1, c2, c3); // Store four complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, Nx, 0, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 2 * Nx, 0, 0)); - vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 3 * Nx, 0, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); + vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 3 * Nx, 0, 0)); } /** Computes a stage of a radix-4 FFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -1214,7 +1278,7 @@ kernel void fft_radix_4_axis_0( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_4_axis_1( +__kernel void fft_radix_4_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1243,13 +1307,17 @@ kernel void fft_radix_4_axis_1( #endif /* IN_PLACE */ // Load four complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0)); - float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3 * Nx, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1260,17 +1328,17 @@ kernel void fft_radix_4_axis_1( DFT_4(c0, c1, c2, c3); // Store four complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0)); - vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 0, 3 * Nx, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); + vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3 * Nx, 0)); } /** Computes a stage of a radix-5 FFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -1290,7 +1358,7 @@ kernel void fft_radix_4_axis_1( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_5_axis_0( +__kernel void fft_radix_5_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1319,14 +1387,19 @@ kernel void fft_radix_5_axis_0( #endif /* IN_PLACE */ // Load five complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, Nx, 0, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 2 * Nx, 0, 0)); - float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 3 * Nx, 0, 0)); - float2 c4 = vload2(0, (__global float *)tensor3D_offset(&input, 4 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 3 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4 * Nx, 0, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1338,18 +1411,18 @@ kernel void fft_radix_5_axis_0( DFT_5(c0, c1, c2, c3, c4); // Store five complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, Nx, 0, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 2 * Nx, 0, 0)); - vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 3 * Nx, 0, 0)); - vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 4 * Nx, 0, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); + vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 3 * Nx, 0, 0)); + vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4 * Nx, 0, 0)); } /** Computes a stage of a radix-5 FFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -1369,7 +1442,7 @@ kernel void fft_radix_5_axis_0( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_5_axis_1( +__kernel void fft_radix_5_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1398,14 +1471,19 @@ kernel void fft_radix_5_axis_1( #endif /* IN_PLACE */ // Load five complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0)); - float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3 * Nx, 0)); - float2 c4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4 * Nx, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1417,18 +1495,18 @@ kernel void fft_radix_5_axis_1( DFT_5(c0, c1, c2, c3, c4); // Store five complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0)); - vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 0, 3 * Nx, 0)); - vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 0, 4 * Nx, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); + vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3 * Nx, 0)); + vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4 * Nx, 0)); } /** Computes a stage of a radix-7 FFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -1448,7 +1526,7 @@ kernel void fft_radix_5_axis_1( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_7_axis_0( +__kernel void fft_radix_7_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1477,16 +1555,23 @@ kernel void fft_radix_7_axis_0( #endif /* IN_PLACE */ // Load seven complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, Nx, 0, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 2 * Nx, 0, 0)); - float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 3 * Nx, 0, 0)); - float2 c4 = vload2(0, (__global float *)tensor3D_offset(&input, 4 * Nx, 0, 0)); - float2 c5 = vload2(0, (__global float *)tensor3D_offset(&input, 5 * Nx, 0, 0)); - float2 c6 = vload2(0, (__global float *)tensor3D_offset(&input, 6 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 3 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 5 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 6 * Nx, 0, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1500,20 +1585,20 @@ kernel void fft_radix_7_axis_0( DFT_7(c0, c1, c2, c3, c4, c5, c6); // Store seven complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, Nx, 0, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 2 * Nx, 0, 0)); - vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 3 * Nx, 0, 0)); - vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 4 * Nx, 0, 0)); - vstore2(c5, 0, (__global float *)tensor3D_offset(&output, 5 * Nx, 0, 0)); - vstore2(c6, 0, (__global float *)tensor3D_offset(&output, 6 * Nx, 0, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); + vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 3 * Nx, 0, 0)); + vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4 * Nx, 0, 0)); + vstore2(c5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 5 * Nx, 0, 0)); + vstore2(c6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 6 * Nx, 0, 0)); } /** Computes a stage of a radix-7 FFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -1533,7 +1618,7 @@ kernel void fft_radix_7_axis_0( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_7_axis_1( +__kernel void fft_radix_7_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1562,16 +1647,23 @@ kernel void fft_radix_7_axis_1( #endif /* IN_PLACE */ // Load seven complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0)); - float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3 * Nx, 0)); - float2 c4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4 * Nx, 0)); - float2 c5 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 5 * Nx, 0)); - float2 c6 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 6 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 5 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 6 * Nx, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1585,20 +1677,20 @@ kernel void fft_radix_7_axis_1( DFT_7(c0, c1, c2, c3, c4, c5, c6); // Store seven complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0)); - vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 0, 3 * Nx, 0)); - vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 0, 4 * Nx, 0)); - vstore2(c5, 0, (__global float *)tensor3D_offset(&output, 0, 5 * Nx, 0)); - vstore2(c6, 0, (__global float *)tensor3D_offset(&output, 0, 6 * Nx, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); + vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3 * Nx, 0)); + vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4 * Nx, 0)); + vstore2(c5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 5 * Nx, 0)); + vstore2(c6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 6 * Nx, 0)); } /** Computes a stage of a radix-8 FFT on axis 0. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -1618,7 +1710,7 @@ kernel void fft_radix_7_axis_1( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_8_axis_0( +__kernel void fft_radix_8_axis_0( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1647,17 +1739,25 @@ kernel void fft_radix_8_axis_0( #endif /* IN_PLACE */ // Load eight complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, Nx, 0, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 2 * Nx, 0, 0)); - float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 3 * Nx, 0, 0)); - float2 c4 = vload2(0, (__global float *)tensor3D_offset(&input, 4 * Nx, 0, 0)); - float2 c5 = vload2(0, (__global float *)tensor3D_offset(&input, 5 * Nx, 0, 0)); - float2 c6 = vload2(0, (__global float *)tensor3D_offset(&input, 6 * Nx, 0, 0)); - float2 c7 = vload2(0, (__global float *)tensor3D_offset(&input, 7 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 3 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 5 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 6 * Nx, 0, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c7 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 7 * Nx, 0, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1672,21 +1772,21 @@ kernel void fft_radix_8_axis_0( DFT_8(c0, c1, c2, c3, c4, c5, c6, c7); // Store eight complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, Nx, 0, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 2 * Nx, 0, 0)); - vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 3 * Nx, 0, 0)); - vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 4 * Nx, 0, 0)); - vstore2(c5, 0, (__global float *)tensor3D_offset(&output, 5 * Nx, 0, 0)); - vstore2(c6, 0, (__global float *)tensor3D_offset(&output, 6 * Nx, 0, 0)); - vstore2(c7, 0, (__global float *)tensor3D_offset(&output, 7 * Nx, 0, 0)); + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); + vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 3 * Nx, 0, 0)); + vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4 * Nx, 0, 0)); + vstore2(c5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 5 * Nx, 0, 0)); + vstore2(c6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 6 * Nx, 0, 0)); + vstore2(c7, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 7 * Nx, 0, 0)); } /** Computes a stage of a radix-8 FFT on axis 1. * * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * - * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F16/f32 * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -1706,7 +1806,7 @@ kernel void fft_radix_8_axis_0( * @param[in] Ni Nx * Ny. * @param[in] exp_const Exponent constant */ -kernel void fft_radix_8_axis_1( +__kernel void fft_radix_8_axis_1( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -1735,17 +1835,25 @@ kernel void fft_radix_8_axis_1( #endif /* IN_PLACE */ // Load eight complex input values - float2 c0 = vload2(0, (__global float *)input.ptr); - float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0)); - float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0)); - float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3 * Nx, 0)); - float2 c4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4 * Nx, 0)); - float2 c5 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 5 * Nx, 0)); - float2 c6 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 6 * Nx, 0)); - float2 c7 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 7 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c0 = vload2(0, (__global DATA_TYPE *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 5 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 6 * Nx, 0)); + VEC_DATA_TYPE(DATA_TYPE, 2) + c7 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 7 * Nx, 0)); // Compute phi - float phi = (float)nx * exp_const; + DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; // Multiply by twiddle factor TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); @@ -1760,12 +1868,13 @@ kernel void fft_radix_8_axis_1( DFT_8(c0, c1, c2, c3, c4, c5, c6, c7); // Store eight complex output values - vstore2(c0, 0, (__global float *)output.ptr); - vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0)); - vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0)); - vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 0, 3 * Nx, 0)); - vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 0, 4 * Nx, 0)); - vstore2(c5, 0, (__global float *)tensor3D_offset(&output, 0, 5 * Nx, 0)); - vstore2(c6, 0, (__global float *)tensor3D_offset(&output, 0, 6 * Nx, 0)); - vstore2(c7, 0, (__global float *)tensor3D_offset(&output, 0, 7 * Nx, 0)); -}
\ No newline at end of file + vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); + vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); + vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); + vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3 * Nx, 0)); + vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4 * Nx, 0)); + vstore2(c5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 5 * Nx, 0)); + vstore2(c6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 6 * Nx, 0)); + vstore2(c7, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 7 * Nx, 0)); +} +#endif // defined(DATA_TYPE)
\ No newline at end of file diff --git a/src/core/CL/cl_kernels/fft_digit_reverse.cl b/src/core/CL/cl_kernels/fft_digit_reverse.cl index 200ab91f49..de566212c6 100644 --- a/src/core/CL/cl_kernels/fft_digit_reverse.cl +++ b/src/core/CL/cl_kernels/fft_digit_reverse.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,10 +23,10 @@ */ #include "helpers.h" -#if defined(VEC_SIZE) +#if defined(VEC_SIZE) && defined(DATA_TYPE) /** Computes the digit reverse stage on axis X * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -61,33 +61,36 @@ __kernel void fft_digit_reverse_axis_0( // Load data #if VEC_SIZE == 1 - float data = *((__global float *)tensor3D_offset(&src, iidx, get_global_id(1), get_global_id(2))); + DATA_TYPE data = *((__global DATA_TYPE *)tensor3D_offset(&src, iidx, get_global_id(1), get_global_id(2))); #elif VEC_SIZE == 2 - float2 data = vload2(0, (__global float *)tensor3D_offset(&src, iidx, get_global_id(1), get_global_id(2))); + VEC_DATA_TYPE(DATA_TYPE, 2) + data = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&src, iidx, get_global_id(1), get_global_id(2))); #else // VEC_SIZE == 1 #error "vec_size of 1 and 2 are supported" #endif // VEC_SIZE == 1 // Create result #if VEC_SIZE == 1 - float2 res = { data, 0 }; + VEC_DATA_TYPE(DATA_TYPE, 2) + res = { data, 0 }; #elif VEC_SIZE == 2 - float2 res = data; + VEC_DATA_TYPE(DATA_TYPE, 2) + res = data; #else // VEC_SIZE == 1 #error "vec_size of 1 and 2 are supported" #endif // VEC_SIZE == 1 // Store result #if defined(CONJ) - vstore2((float2)(res.s0, -res.s1), 0, (__global float *)dst.ptr); + vstore2((VEC_DATA_TYPE(DATA_TYPE, 2))(res.s0, -res.s1), 0, (__global DATA_TYPE *)dst.ptr); #else // defined(CONJ) - vstore2(res, 0, (__global float *)dst.ptr); + vstore2(res, 0, (__global DATA_TYPE *)dst.ptr); #endif // defined(CONJ) } /** Computes the digit reverse stage on axis Y * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -122,27 +125,30 @@ __kernel void fft_digit_reverse_axis_1( // Load data #if VEC_SIZE == 1 - float data = *((__global float *)tensor3D_offset(&src, get_global_id(0), iidx, get_global_id(2))); + DATA_TYPE data = *((__global DATA_TYPE *)tensor3D_offset(&src, get_global_id(0), iidx, get_global_id(2))); #elif VEC_SIZE == 2 - float2 data = vload2(0, (__global float *)tensor3D_offset(&src, get_global_id(0), iidx, get_global_id(2))); + VEC_DATA_TYPE(DATA_TYPE, 2) + data = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&src, get_global_id(0), iidx, get_global_id(2))); #else // VEC_SIZE == 1 #error "vec_size of 1 and 2 are supported" #endif // VEC_SIZE == 1 // Create result #if VEC_SIZE == 1 - float2 res = { data, 0 }; + VEC_DATA_TYPE(DATA_TYPE, 2) + res = { data, 0 }; #elif VEC_SIZE == 2 - float2 res = data; + VEC_DATA_TYPE(DATA_TYPE, 2) + res = data; #else // VEC_SIZE == 1 #error "vec_size of 1 and 2 are supported" #endif // VEC_SIZE == 1 // Store result #if defined(CONJ) - vstore2((float2)(res.s0, -res.s1), 0, (__global float *)dst.ptr); + vstore2((VEC_DATA_TYPE(DATA_TYPE, 2))(res.s0, -res.s1), 0, (__global DATA_TYPE *)dst.ptr); #else // defined(CONJ) - vstore2(res, 0, (__global float *)dst.ptr); + vstore2(res, 0, (__global DATA_TYPE *)dst.ptr); #endif // defined(CONJ) } -#endif // defined(VEC_SIZE)
\ No newline at end of file +#endif // defined(VEC_SIZE) && defined(DATA_TYPE)
\ No newline at end of file diff --git a/src/core/CL/cl_kernels/fft_scale.cl b/src/core/CL/cl_kernels/fft_scale.cl index 270fb78ae2..57e25ef504 100644 --- a/src/core/CL/cl_kernels/fft_scale.cl +++ b/src/core/CL/cl_kernels/fft_scale.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,9 +23,10 @@ */ #include "helpers.h" +#if defined(VEC_SIZE) && defined(DATA_TYPE) /** Computes the fft scale stage * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -62,17 +63,19 @@ __kernel void fft_scale_conj( // Store result #if VEC_SIZE == 1 - *((__global float *)dst.ptr) = (*(__global float *)src.ptr) / scale; + *((__global DATA_TYPE *)dst.ptr) = (*(__global DATA_TYPE *)src.ptr) / (DATA_TYPE)scale; #elif VEC_SIZE == 2 // Load data - float2 data = vload2(0, (__global float *)src.ptr); - data /= scale; + VEC_DATA_TYPE(DATA_TYPE, 2) + data = vload2(0, (__global DATA_TYPE *)src.ptr); + data /= (DATA_TYPE)scale; #if defined(CONJ) - vstore2((float2)(data.s0, -data.s1), 0, (__global float *)dst.ptr); + vstore2((VEC_DATA_TYPE(DATA_TYPE, 2))(data.s0, -data.s1), 0, (__global DATA_TYPE *)dst.ptr); #else // defined(CONJ) - vstore2(data, 0, (__global float *)dst.ptr); + vstore2(data, 0, (__global DATA_TYPE *)dst.ptr); #endif // defined(CONJ) #else // VEC_SIZE == 1 #error "vec_size of 1 and 2 are supported" #endif // VEC_SIZE == 1 -}
\ No newline at end of file +} +#endif // defined(VEC_SIZE) && defined(DATA_TYPE)
\ No newline at end of file diff --git a/src/core/CL/cl_kernels/pixelwise_mul_float.cl b/src/core/CL/cl_kernels/pixelwise_mul_float.cl index 4fa1551b54..845e1c9860 100644 --- a/src/core/CL/cl_kernels/pixelwise_mul_float.cl +++ b/src/core/CL/cl_kernels/pixelwise_mul_float.cl @@ -105,9 +105,11 @@ __kernel void pixelwise_mul_float( } #endif /* defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(ACC_DATA_TYPE) && defined(DATA_TYPE_OUT) */ +#if defined(DATA_TYPE) + /** Performs a pixelwise multiplication of complex float values * - * @param[in] in1_ptr Pointer to the source image. Supported data types: F32 + * @param[in] in1_ptr Pointer to the source image. Supported data types: F16/F32 * @param[in] in1_stride_x Stride of the source image in X dimension (in bytes) * @param[in] in1_step_x in1_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] in1_stride_y Stride of the source image in Y dimension (in bytes) @@ -143,16 +145,21 @@ __kernel void pixelwise_mul_complex( Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out); // Load data - float2 vin1 = vload2(0, (__global float *)in1.ptr); - float2 vin2 = vload2(0, (__global float *)in2.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + vin1 = vload2(0, (__global DATA_TYPE *)in1.ptr); + VEC_DATA_TYPE(DATA_TYPE, 2) + vin2 = vload2(0, (__global DATA_TYPE *)in2.ptr); // Perform complex multiplication - float2 res = { vin1.x *vin2.x - vin1.y * vin2.y, vin1.x *vin2.y + vin2.x * vin1.y }; + VEC_DATA_TYPE(DATA_TYPE, 2) + res = { vin1.x *vin2.x - vin1.y * vin2.y, vin1.x *vin2.y + vin2.x * vin1.y }; #if defined(ACTIVATION_TYPE) - vstore2(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, res, A_VAL, B_VAL), 0, (__global float *)out.ptr); + vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, res, A_VAL, B_VAL), 0, (__global DATA_TYPE *)out.ptr); #else // defined(ACTIVATION_TYPE) // Store result - vstore2(res, 0, (__global float *)out.ptr); + vstore2(res, 0, (__global DATA_TYPE *)out.ptr); #endif // defined(ACTIVATION_TYPE) } + +#endif // defined(DATA_TYPE)
\ No newline at end of file diff --git a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp index 922e50aa73..448f5a9c1e 100644 --- a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp +++ b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp @@ -38,7 +38,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config) { ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() != DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != 1 && input->num_channels() != 2); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(idx, 1, DataType::U32); ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0); @@ -90,6 +90,7 @@ void CLFFTDigitReverseKernel::configure(const CLCompileContext &compile_context, // Create kernel CLBuildOptions build_opts; build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(input->info()->num_channels())); + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); build_opts.add_option_if(config.conjugate, "-DCONJ"); std::string kernel_name = "fft_digit_reverse_axis_" + support::cpp11::to_string(config.axis); _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); diff --git a/src/core/CL/kernels/CLFFTDigitReverseKernel.h b/src/core/CL/kernels/CLFFTDigitReverseKernel.h index 2e2f1bdff4..e5583a4c22 100644 --- a/src/core/CL/kernels/CLFFTDigitReverseKernel.h +++ b/src/core/CL/kernels/CLFFTDigitReverseKernel.h @@ -51,7 +51,7 @@ public: ~CLFFTDigitReverseKernel() = default; /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: F32. + * @param[in] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Data type supported: same as @p input * @param[in] idx Digit reverse index tensor. Data type supported: U32 * @param[in] config Kernel configuration. @@ -60,7 +60,7 @@ public: /** Set the input and output tensors. * * @param[in] compile_context The compile context to be used. - * @param[in] input Source tensor. Data types supported: F32. + * @param[in] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Data type supported: same as @p input * @param[in] idx Digit reverse index tensor. Data type supported: U32 * @param[in] config Kernel configuration. @@ -68,7 +68,7 @@ public: void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config); /** Static function to check if given info will lead to a valid configuration of @ref CLFFTDigitReverseKernel * - * @param[in] input Source tensor info. Data types supported: F32. + * @param[in] input Source tensor info. Data types supported: F16/F32. * @param[in] output Destination tensor info. Data type supported: same as @p input * @param[in] idx Digit reverse index tensor info. Data type supported: U32 * @param[in] config Kernel configuration. diff --git a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp index 0f06640b64..68ccb5e8e6 100644 --- a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp +++ b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp @@ -42,7 +42,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const FFTRadixStageKernelInfo &config) { ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(CLFFTRadixStageKernel::supported_radix().count(config.radix) == 0); ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[config.axis] % config.radix); @@ -99,6 +99,7 @@ void CLFFTRadixStageKernel::configure(const CLCompileContext &compile_context, I // Create build options CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); build_opts.add_option_if(_run_in_place, "-DIN_PLACE"); // Create kernel diff --git a/src/core/CL/kernels/CLFFTRadixStageKernel.h b/src/core/CL/kernels/CLFFTRadixStageKernel.h index c3cc510bdd..9bb310db83 100644 --- a/src/core/CL/kernels/CLFFTRadixStageKernel.h +++ b/src/core/CL/kernels/CLFFTRadixStageKernel.h @@ -55,7 +55,7 @@ public: * * @note If the output tensor is nullptr, the FFT will be performed in-place * - * @param[in,out] input Source tensor. Data types supported: F32. + * @param[in,out] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Can be nullptr. Data type supported: same as @p input * @param[in] config FFT descriptor metadata. */ @@ -65,14 +65,14 @@ public: * @note If the output tensor is nullptr, the FFT will be performed in-place * * @param[in] compile_context The compile context to be used. - * @param[in,out] input Source tensor. Data types supported: F32. + * @param[in,out] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Can be nullptr. Data type supported: same as @p input * @param[in] config FFT descriptor metadata. */ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config); /** Static function to check if given info will lead to a valid configuration of @ref CLFFTRadixStageKernel * - * @param[in] input Source tensor info. Data types supported: F32. + * @param[in] input Source tensor info. Data types supported: F16/F32. * @param[in] output Destination tensor info. Can be nullptr. Data type supported: same as @p input * @param[in] config FFT descriptor metadata. * diff --git a/src/core/CL/kernels/CLFFTScaleKernel.cpp b/src/core/CL/kernels/CLFFTScaleKernel.cpp index 4dbe8d2e86..f82aeca34b 100644 --- a/src/core/CL/kernels/CLFFTScaleKernel.cpp +++ b/src/core/CL/kernels/CLFFTScaleKernel.cpp @@ -38,7 +38,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F16, DataType::F32); // Checks performed when output is configured if((output != nullptr) && (output->total_size() != 0)) @@ -94,6 +94,7 @@ void CLFFTScaleKernel::configure(const CLCompileContext &compile_context, ICLTen CLBuildOptions build_opts; build_opts.add_option_if(_run_in_place, "-DIN_PLACE"); build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(output != nullptr ? output->info()->num_channels() : input->info()->num_channels())); + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); build_opts.add_option_if(config.conjugate, "-DCONJ"); std::string kernel_name = "fft_scale_conj"; _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); diff --git a/src/core/CL/kernels/CLFFTScaleKernel.h b/src/core/CL/kernels/CLFFTScaleKernel.h index cb007e5307..cc518be193 100644 --- a/src/core/CL/kernels/CLFFTScaleKernel.h +++ b/src/core/CL/kernels/CLFFTScaleKernel.h @@ -51,7 +51,7 @@ public: ~CLFFTScaleKernel() = default; /** Set the input and output tensors. * - * @param[in,out] input Source tensor. Data types supported: F32. + * @param[in,out] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Data type supported: same as @p input * @param[in] config Kernel configuration */ @@ -59,14 +59,14 @@ public: /** Set the input and output tensors. * * @param[in] compile_context The compile context to be used. - * @param[in,out] input Source tensor. Data types supported: F32. + * @param[in,out] input Source tensor. Data types supported: F16/F32. * @param[out] output Destination tensor. Data type supported: same as @p input * @param[in] config Kernel configuration */ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config); /** Static function to check if given info will lead to a valid configuration of @ref CLFFTScaleKernel * - * @param[in] input Source tensor info. Data types supported: F32. + * @param[in] input Source tensor info. Data types supported: F16/F32. * @param[in] output Destination tensor info. Data type supported: same as @p input * @param[in] config Kernel configuration * diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp index a6255f8018..c68c526ec9 100644 --- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp +++ b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp @@ -329,8 +329,9 @@ constexpr unsigned int num_elems_processed_per_iteration_complex = 1; Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 2, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 2, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 2, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 2, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2); const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape()); @@ -340,7 +341,8 @@ Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo * // Validate in case of configured output if(output->total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 2, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 2, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output"); } @@ -400,6 +402,7 @@ void CLComplexPixelWiseMultiplicationKernel::configure(const CLCompileContext &c _output = output; CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(_output->data_type())); if(act_info.enabled()) { build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation()))); diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h index 0cc4005875..74102fd397 100644 --- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h +++ b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h @@ -157,7 +157,7 @@ public: CLComplexPixelWiseMultiplicationKernel &operator=(CLComplexPixelWiseMultiplicationKernel &&) = default; /** Initialise the kernel's input, output and border mode. * - * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2. + * @param[in] input1 An input tensor info. Data types supported: F16/F32. Number of channels supported: 2. * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1. * @param[out] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp index 9d49a2193a..2697a0df98 100644 --- a/src/core/CL/kernels/CLReductionOperationKernel.cpp +++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp @@ -55,7 +55,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u } else { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F16, DataType::F32); } ARM_COMPUTE_RETURN_ERROR_ON_MSG(op == ReductionOperation::SUM_SQUARE && input->data_type() == DataType::QASYMM8, "Not supported reduction operation for QASYMM8"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions"); diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp index edd9298d26..5bfbc7ce57 100644 --- a/src/runtime/CL/functions/CLConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp @@ -88,7 +88,7 @@ void CLConvolutionLayer::configure(const CLCompileContext &compile_context, ICLT case ConvolutionMethod::FFT: { auto f = std::make_unique<CLFFTConvolutionLayer>(_memory_manager); - f->configure(compile_context, input, weights, biases, output, conv_info, act_info); + f->configure(compile_context, input, weights, biases, output, conv_info, act_info, enable_fast_math); _function = std::move(f); break; } @@ -131,7 +131,7 @@ Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo case ConvolutionMethod::FFT: { // Validate FFT-based convolution layer - ARM_COMPUTE_RETURN_ON_ERROR(CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)); break; } default: @@ -204,7 +204,7 @@ ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo * { return ConvolutionMethod::DIRECT; } - if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info))) + if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math))) { return ConvolutionMethod::FFT; } diff --git a/src/runtime/CL/functions/CLFFT1D.cpp b/src/runtime/CL/functions/CLFFT1D.cpp index c434b4e570..cf136dc75e 100644 --- a/src/runtime/CL/functions/CLFFT1D.cpp +++ b/src/runtime/CL/functions/CLFFT1D.cpp @@ -118,7 +118,7 @@ void CLFFT1D::configure(const CLCompileContext &compile_context, const ICLTensor Status CLFFT1D::validate(const ITensorInfo *input, const ITensorInfo *output, const FFT1DInfo &config) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() != DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != 1 && input->num_channels() != 2); ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0); diff --git a/src/runtime/CL/functions/CLFFT2D.cpp b/src/runtime/CL/functions/CLFFT2D.cpp index 1d444bb15d..e0497ca6dc 100644 --- a/src/runtime/CL/functions/CLFFT2D.cpp +++ b/src/runtime/CL/functions/CLFFT2D.cpp @@ -67,6 +67,7 @@ void CLFFT2D::configure(const CLCompileContext &compile_context, const ICLTensor Status CLFFT2D::validate(const ITensorInfo *input, const ITensorInfo *output, const FFT2DInfo &config) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32); // Create intermediate tensor info TensorInfo first_pass_tensor(input->clone()->set_is_resizable(true).reset_padding().set_num_channels(2)); diff --git a/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp b/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp index 97b64b24f3..45e74df703 100644 --- a/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp @@ -104,14 +104,17 @@ CLFFTConvolutionLayer::CLFFTConvolutionLayer(std::shared_ptr<IMemoryManager> mem } void CLFFTConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info) + const ActivationLayerInfo &act_info, bool enable_fast_math) { - configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, act_info); + configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, act_info, enable_fast_math); } void CLFFTConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info) + const ActivationLayerInfo &act_info, bool enable_fast_math) { + ARM_COMPUTE_UNUSED(enable_fast_math); + ARM_COMPUTE_ERROR_THROW_ON(CLFFTConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info, act_info, enable_fast_math)); + _original_weights = weights; _original_bias = biases; @@ -265,9 +268,10 @@ void CLFFTConvolutionLayer::configure(const CLCompileContext &compile_context, I } Status CLFFTConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info) + const ActivationLayerInfo &act_info, bool enable_fast_math) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON((input->data_type() == DataType::F16) && !enable_fast_math); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); // Get indices for the width and height @@ -287,9 +291,8 @@ Status CLFFTConvolutionLayer::validate(const ITensorInfo *input, const ITensorIn // Validate biases if(biases != nullptr) { - const size_t idx_channels = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channels] != biases->tensor_shape().x()); + ARM_COMPUTE_RETURN_ERROR_ON(weights->tensor_shape()[3] != biases->tensor_shape().x()); } // Checks performed when output is configured diff --git a/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp b/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp index bb6b5ed6b4..60a747daa3 100644 --- a/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp @@ -103,8 +103,10 @@ NEFFTConvolutionLayer::NEFFTConvolutionLayer(std::shared_ptr<IMemoryManager> mem NEFFTConvolutionLayer::~NEFFTConvolutionLayer() = default; void NEFFTConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info) + const ActivationLayerInfo &act_info, bool enable_fast_math) { + ARM_COMPUTE_UNUSED(enable_fast_math); + _original_weights = weights; _original_bias = biases; @@ -258,8 +260,10 @@ void NEFFTConvolutionLayer::configure(ITensor *input, const ITensor *weights, co } Status NEFFTConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info) + const ActivationLayerInfo &act_info, bool enable_fast_math) { + ARM_COMPUTE_UNUSED(enable_fast_math); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); diff --git a/tests/validation/CL/FFT.cpp b/tests/validation/CL/FFT.cpp index 1115ddcd8b..fb2f1f53e2 100644 --- a/tests/validation/CL/FFT.cpp +++ b/tests/validation/CL/FFT.cpp @@ -64,8 +64,10 @@ const auto ActivationFunctionsSmallDataset = framework::dataset::make("Activatio ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f) }); -RelativeTolerance<float> tolerance_f32(0.1f); /**< Relative tolerance value for FP32 */ -constexpr float tolerance_num = 0.07f; /**< Tolerance number */ +RelativeTolerance<float> tolerance_f32(0.1f); /**< Relative tolerance value for FP32 */ +RelativeTolerance<half> tolerance_f16(half(0.1f)); /**< Relative tolerance value for FP16 */ +constexpr float tolerance_num_f32 = 0.07f; /**< Tolerance number for FP32*/ +constexpr float tolerance_num_f16 = 0.15f; /**< Tolerance number for FP32*/ } // namespace TEST_SUITE(CL) @@ -108,9 +110,16 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture<float>, framework::DatasetMode::ALL, combine(shapes_1d, framework::dataset::make("DataType", DataType::F32))) { // Validate output - validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num); + validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num_f32); } TEST_SUITE_END() // FP32 +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture<half>, framework::DatasetMode::ALL, combine(shapes_1d, framework::dataset::make("DataType", DataType::F16))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num_f16); +} +TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // FFT1D @@ -149,9 +158,16 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT2DFixture<float>, framework::DatasetMode::ALL, combine(shapes_2d, framework::dataset::make("DataType", DataType::F32))) { // Validate output - validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num); + validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num_f32); } TEST_SUITE_END() // FP32 +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT2DFixture<half>, framework::DatasetMode::ALL, combine(shapes_2d, framework::dataset::make("DataType", DataType::F16))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num_f16); +} +TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // FFT2D @@ -168,9 +184,19 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture<float>, framework: ActivationFunctionsSmallDataset)) { // Validate output - validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num); + validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num_f32); } TEST_SUITE_END() // FP32 +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsSmallDataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num_f16); +} +TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // FFTConvolutionLayer diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h index dad774ce51..564098497b 100644 --- a/tests/validation/fixtures/FFTFixture.h +++ b/tests/validation/fixtures/FFTFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,8 @@ #include "tests/validation/reference/ConvolutionLayer.h" #include "tests/validation/reference/DFT.h" +#include "utils/Utils.h" + #include <random> namespace arm_compute @@ -59,8 +61,23 @@ protected: template <typename U> void fill(U &&tensor) { - std::uniform_real_distribution<float> distribution(-5.f, 5.f); - library->fill(tensor, distribution, 0); + switch(tensor.data_type()) + { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_fp16 distribution(half(-5.0f), half(5.0f)); + library->fill(tensor, distribution, 0); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(-5.0f, 5.0f); + library->fill(tensor, distribution, 0); + break; + } + default: + library->fill_tensor_uniform(tensor, 0); + } } TensorType compute_target(const TensorShape &shape, DataType data_type) @@ -134,9 +151,15 @@ protected: { switch(tensor.data_type()) { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_fp16 distribution(half(-1.0f), half(1.0f)); + library->fill(tensor, distribution, i); + break; + } case DataType::F32: { - std::uniform_real_distribution<> distribution(-1.0f, 1.0f); + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); library->fill(tensor, distribution, i); break; } @@ -166,7 +189,7 @@ protected: // Create and configure function FunctionType conv; - conv.configure(&src, &weights, &bias, &dst, info, act_info); + conv.configure(&src, &weights, &bias, &dst, info, act_info, _data_type == DataType::F16); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); diff --git a/tests/validation/reference/DFT.cpp b/tests/validation/reference/DFT.cpp index 1f746eaeb7..b98bc77b1d 100644 --- a/tests/validation/reference/DFT.cpp +++ b/tests/validation/reference/DFT.cpp @@ -318,7 +318,7 @@ SimpleTensor<T> ridft_1d(const SimpleTensor<T> &src, bool is_odd) { auto dst = rdft_1d_core(src, FFTDirection::Inverse, is_odd); - const T scaling_factor = dst.shape()[0]; + const T scaling_factor = T(dst.shape()[0]); scale(dst, scaling_factor); return dst; @@ -330,7 +330,7 @@ SimpleTensor<T> dft_1d(const SimpleTensor<T> &src, FFTDirection direction) auto dst = dft_1d_core(src, direction); if(direction == FFTDirection::Inverse) { - const T scaling_factor = dst.shape()[0]; + const T scaling_factor = T(dst.shape()[0]); scale(dst, scaling_factor); } return dst; @@ -359,7 +359,7 @@ SimpleTensor<T> ridft_2d(const SimpleTensor<T> &src, bool is_odd) auto transposed_2 = permute(first_pass, PermutationVector(1U, 0U)); auto dst = rdft_1d_core(transposed_2, direction, is_odd); - const T scaling_factor = dst.shape()[0] * dst.shape()[1]; + const T scaling_factor = T(dst.shape()[0] * dst.shape()[1]); scale(dst, scaling_factor); return dst; } @@ -383,7 +383,7 @@ SimpleTensor<T> dft_2d(const SimpleTensor<T> &src, FFTDirection direction) auto transposed_2 = permute(first_pass, PermutationVector(1U, 0U)); auto dst = dft_1d_core(transposed_2, direction); - const T scaling_factor = dst.shape()[0] * dst.shape()[1]; + const T scaling_factor = T(dst.shape()[0] * dst.shape()[1]); scale(dst, scaling_factor); return dst; @@ -425,6 +425,7 @@ SimpleTensor<T> conv2d_dft(const SimpleTensor<T> &src, const SimpleTensor<T> &w, return slice(conv_res, Coordinates(start_left, start_top), Coordinates(end_right, end_botton)); } +// FP32 template SimpleTensor<float> rdft_1d(const SimpleTensor<float> &src); template SimpleTensor<float> ridft_1d(const SimpleTensor<float> &src, bool is_odd); template SimpleTensor<float> dft_1d(const SimpleTensor<float> &src, FFTDirection direction); @@ -434,6 +435,17 @@ template SimpleTensor<float> ridft_2d(const SimpleTensor<float> &src, bool is_od template SimpleTensor<float> dft_2d(const SimpleTensor<float> &src, FFTDirection direction); template SimpleTensor<float> conv2d_dft(const SimpleTensor<float> &src, const SimpleTensor<float> &w, const PadStrideInfo &conv_info); + +// FP16 +template SimpleTensor<half> rdft_1d(const SimpleTensor<half> &src); +template SimpleTensor<half> ridft_1d(const SimpleTensor<half> &src, bool is_odd); +template SimpleTensor<half> dft_1d(const SimpleTensor<half> &src, FFTDirection direction); + +template SimpleTensor<half> rdft_2d(const SimpleTensor<half> &src); +template SimpleTensor<half> ridft_2d(const SimpleTensor<half> &src, bool is_odd); +template SimpleTensor<half> dft_2d(const SimpleTensor<half> &src, FFTDirection direction); + +template SimpleTensor<half> conv2d_dft(const SimpleTensor<half> &src, const SimpleTensor<half> &w, const PadStrideInfo &conv_info); } // namespace reference } // namespace validation } // namespace test |