aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-03-26 17:23:28 +0000
committerGian Marco Iodice <gianmarco.iodice@arm.com>2019-04-11 09:34:26 +0000
commit8be9148814b88e5b0cabd5a4d2b1f4ff470a8c1c (patch)
tree760658b8c7b8917379467bd3fc119a5502faa850
parenta50e702289af66944e860eafc7f3b32f6c5f30be (diff)
downloadComputeLibrary-8be9148814b88e5b0cabd5a4d2b1f4ff470a8c1c.tar.gz
COMPMID-1959: Implements 2D FFT on OpenCL
Change-Id: I73cf3984a5463acc854c8a59dc2bd9a5234cd99c Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/936 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
-rw-r--r--arm_compute/core/CL/CLKernels.h1
-rw-r--r--arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h10
-rw-r--r--arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLFFTScaleKernel.h78
-rw-r--r--arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h48
-rw-r--r--arm_compute/core/KernelDescriptors.h18
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h2
-rw-r--r--arm_compute/runtime/CL/functions/CLFFT1D.h12
-rw-r--r--arm_compute/runtime/CL/functions/CLFFT2D.h76
-rw-r--r--arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h154
-rw-r--r--arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h27
-rw-r--r--arm_compute/runtime/FunctionDescriptors.h21
-rw-r--r--src/core/CL/CLKernelLibrary.cpp25
-rw-r--r--src/core/CL/cl_kernels/fft.cl1077
-rw-r--r--src/core/CL/cl_kernels/fft_digit_reverse.cl148
-rw-r--r--src/core/CL/cl_kernels/fft_scale.cl78
-rw-r--r--src/core/CL/cl_kernels/pixelwise_mul_float.cl52
-rw-r--r--src/core/CL/cl_kernels/reduction_operation.cl17
-rw-r--r--src/core/CL/kernels/CLFFTDigitReverseKernel.cpp36
-rw-r--r--src/core/CL/kernels/CLFFTRadixStageKernel.cpp19
-rw-r--r--src/core/CL/kernels/CLFFTScaleKernel.cpp143
-rw-r--r--src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp142
-rw-r--r--src/core/CL/kernels/CLReductionOperationKernel.cpp12
-rw-r--r--src/runtime/CL/functions/CLFFT1D.cpp49
-rw-r--r--src/runtime/CL/functions/CLFFT2D.cpp95
-rw-r--r--src/runtime/CL/functions/CLFFTConvolutionLayer.cpp380
-rw-r--r--src/runtime/CL/functions/CLPixelWiseMultiplication.cpp29
-rw-r--r--tests/benchmark/CL/ConvolutionLayer.cpp18
-rw-r--r--tests/benchmark/CL/FFT.cpp7
-rw-r--r--tests/benchmark/fixtures/FFTConvolutionLayerFixture.h100
-rw-r--r--tests/benchmark/fixtures/FFTFixture.h6
-rw-r--r--tests/datasets/SmallConvolutionLayerDataset.h13
-rw-r--r--tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h51
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp2
-rw-r--r--tests/validation/CL/FFT.cpp119
-rw-r--r--tests/validation/CL/ReductionOperation.cpp2
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h2
-rw-r--r--tests/validation/fixtures/FFTFixture.h138
38 files changed, 2962 insertions, 249 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index e3ffcd0704..57498715c8 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -66,6 +66,7 @@
#include "arm_compute/core/CL/kernels/CLErodeKernel.h"
#include "arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h"
#include "arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h"
+#include "arm_compute/core/CL/kernels/CLFFTScaleKernel.h"
#include "arm_compute/core/CL/kernels/CLFastCornersKernel.h"
#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
#include "arm_compute/core/CL/kernels/CLFlattenLayerKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h b/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h
index 10652cdb4d..3082cb186f 100644
--- a/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h
+++ b/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h
@@ -26,6 +26,8 @@
#include "arm_compute/core/CL/ICLKernel.h"
+#include "arm_compute/core/KernelDescriptors.h"
+
namespace arm_compute
{
// Forward declarations
@@ -52,19 +54,19 @@ public:
* @param[in] input Source tensor. Data types supported: F32.
* @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] idx Digit reverse index tensor. Data type supported: U32
- * @param[in] axis Axis to perform digit reverse on.
+ * @param[in] config Kernel configuration.
*/
- void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, unsigned int axis);
+ void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTDigitReverseKernel
*
* @param[in] input Source tensor info. Data types supported: F32.
* @param[in] output Destination tensor info. Data type supported: same as @p input
* @param[in] idx Digit reverse index tensor info. Data type supported: U32
- * @param[in] axis Axis to perform digit reverse on.
+ * @param[in] config Kernel configuration.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, unsigned int axis);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h b/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h
index 9de775eafa..16fa390e5d 100644
--- a/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h
+++ b/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h
@@ -59,7 +59,7 @@ public:
* @param[out] output Destination tensor. Can be nullptr. Data type supported: same as @p input
* @param[in] config FFT descriptor metadata.
*/
- void configure(ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelDescriptor &config);
+ void configure(ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTRadixStageKernel
*
* @param[in] input Source tensor info. Data types supported: F32.
@@ -68,7 +68,7 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const FFTRadixStageKernelDescriptor &config);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const FFTRadixStageKernelInfo &config);
/** Returns the radix that are support by the FFT kernel
*
* @return A set of supported radix
diff --git a/arm_compute/core/CL/kernels/CLFFTScaleKernel.h b/arm_compute/core/CL/kernels/CLFFTScaleKernel.h
new file mode 100644
index 0000000000..39ecac42af
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLFFTScaleKernel.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLFFTSCALEKERNEL_H__
+#define __ARM_COMPUTE_CLFFTSCALEKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+
+#include "arm_compute/core/KernelDescriptors.h"
+
+namespace arm_compute
+{
+// Forward declarations
+class ICLTensor;
+
+/** Interface for the inverse fft scale kernel. */
+class CLFFTScaleKernel : public ICLKernel
+{
+public:
+ /** Constructor */
+ CLFFTScaleKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLFFTScaleKernel(const CLFFTScaleKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLFFTScaleKernel &operator=(const CLFFTScaleKernel &) = delete;
+ /** Default Move Constructor. */
+ CLFFTScaleKernel(CLFFTScaleKernel &&) = default;
+ /** Default move assignment operator */
+ CLFFTScaleKernel &operator=(CLFFTScaleKernel &&) = default;
+ /** Default destructor */
+ ~CLFFTScaleKernel() = default;
+ /** Set the input and output tensors.
+ *
+ * @param[in,out] input Source tensor. Data types supported: F32.
+ * @param[out] output Destination tensor. Data type supported: same as @p input
+ * @param[in] config Kernel configuration
+ */
+ void configure(ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLFFTScaleKernel
+ *
+ * @param[in] input Source tensor info. Data types supported: F32.
+ * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] config Kernel configuration
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const FFTScaleKernelInfo &config);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ ICLTensor *_input;
+ ICLTensor *_output;
+ bool _run_in_place;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLFFTSCALEKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
index b835aa701b..804182b187 100644
--- a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,11 +29,10 @@
namespace arm_compute
{
+// Forward declarations
class ICLTensor;
-/** Interface for the pixelwise multiplication kernel.
- *
- */
+/** Interface for the pixelwise multiplication kernel. */
class CLPixelWiseMultiplicationKernel : public ICLKernel
{
public:
@@ -83,5 +82,46 @@ private:
const ICLTensor *_input2;
ICLTensor *_output;
};
+
+/** Interface for the complex pixelwise multiplication kernel. */
+class CLComplexPixelWiseMultiplicationKernel : public ICLKernel
+{
+public:
+ /** Default constructor.*/
+ CLComplexPixelWiseMultiplicationKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLComplexPixelWiseMultiplicationKernel(const CLComplexPixelWiseMultiplicationKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLComplexPixelWiseMultiplicationKernel &operator=(const CLComplexPixelWiseMultiplicationKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLComplexPixelWiseMultiplicationKernel(CLComplexPixelWiseMultiplicationKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLComplexPixelWiseMultiplicationKernel &operator=(CLComplexPixelWiseMultiplicationKernel &&) = default;
+ /** Initialise the kernel's input, output and border mode.
+ *
+ * @param[in] input1 An input tensor. Data types supported: F32. Number of channels supported: 2.
+ * @param[in] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ */
+ void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplicationKernel
+ *
+ * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2.
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[in] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+ BorderSize border_size() const override;
+
+private:
+ const ICLTensor *_input1;
+ const ICLTensor *_input2;
+ ICLTensor *_output;
+};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_CLPIXELWISEMULTIPLICATIONKERNEL_H__ */
diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h
index 186dbfb6d8..83131f4296 100644
--- a/arm_compute/core/KernelDescriptors.h
+++ b/arm_compute/core/KernelDescriptors.h
@@ -26,10 +26,24 @@
namespace arm_compute
{
+/** Descriptor for FFT scale kernels */
+struct FFTScaleKernelInfo
+{
+ float scale{ 0.f }; /**< Axis to perform the kernel on. */
+ bool conjugate{ true }; /**< Flag to conjugate the output/ */
+};
+
+/** Descriptor for FFT digit reverse kernels */
+struct FFTDigitReverseKernelInfo
+{
+ unsigned int axis{ 0 }; /**< Axis to perform the kernel on. */
+ bool conjugate{ false }; /**< Flag to conjugate the output/ */
+};
+
/** Descriptor used by the FFT core kernels */
-struct FFTRadixStageKernelDescriptor
+struct FFTRadixStageKernelInfo
{
- unsigned int axis{ 0 }; /**< Axis to run the FFT on. */
+ unsigned int axis{ 0 }; /**< Axis to run the kernel on. */
unsigned int radix{ 0 }; /**< Radix to use. */
unsigned int Nx{ 0 }; /**< Nx coefficient. */
bool is_first_stage{ false }; /**< Flags if the FFT kernels is the first stage of a decomposed FFT. */
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index f1021843a0..a4fcdc27ac 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -67,6 +67,8 @@
#include "arm_compute/runtime/CL/functions/CLEqualizeHistogram.h"
#include "arm_compute/runtime/CL/functions/CLErode.h"
#include "arm_compute/runtime/CL/functions/CLFFT1D.h"
+#include "arm_compute/runtime/CL/functions/CLFFT2D.h"
+#include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLFastCorners.h"
#include "arm_compute/runtime/CL/functions/CLFillBorder.h"
#include "arm_compute/runtime/CL/functions/CLFlattenLayer.h"
diff --git a/arm_compute/runtime/CL/functions/CLFFT1D.h b/arm_compute/runtime/CL/functions/CLFFT1D.h
index 1612cf7f50..029023c524 100644
--- a/arm_compute/runtime/CL/functions/CLFFT1D.h
+++ b/arm_compute/runtime/CL/functions/CLFFT1D.h
@@ -28,6 +28,7 @@
#include "arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h"
#include "arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h"
+#include "arm_compute/core/CL/kernels/CLFFTScaleKernel.h"
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/FunctionDescriptors.h"
@@ -39,8 +40,9 @@ class ICLTensor;
/** Basic function to execute one dimensional FFT. This function calls the following OpenCL kernels:
*
- * -# @ref CLFFTDigitReverseKernel Performs digit reverse
- * -# @ref CLFFTRadixStageKernel A list of FFT kernels depending on the radix decomposition
+ * -# @ref CLFFTDigitReverseKernel Performs digit reverse.
+ * -# @ref CLFFTRadixStageKernel A list of FFT kernels depending on the radix decomposition.
+ * -# @ref CLFFTScaleKernel Performs output scaling in case of in inverse FFT.
*/
class CLFFT1D : public IFunction
{
@@ -69,11 +71,13 @@ public:
protected:
CLMemoryGroup _memory_group;
- CLTensor _digit_reversed_input;
- CLTensor _digit_reverse_indices;
CLFFTDigitReverseKernel _digit_reverse_kernel;
std::unique_ptr<CLFFTRadixStageKernel[]> _fft_kernels;
+ CLFFTScaleKernel _scale_kernel;
+ CLTensor _digit_reversed_input;
+ CLTensor _digit_reverse_indices;
unsigned int _num_ffts;
+ bool _run_scale;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_CLFFT1D_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLFFT2D.h b/arm_compute/runtime/CL/functions/CLFFT2D.h
new file mode 100644
index 0000000000..a0673ecc96
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLFFT2D.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLFFT2D_H__
+#define __ARM_COMPUTE_CLFFT2D_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLFFT1D.h"
+#include "arm_compute/runtime/FunctionDescriptors.h"
+
+namespace arm_compute
+{
+// Forward declaration
+class ICLTensor;
+
+/** Basic function to execute two dimensional FFT. This function calls the following OpenCL kernels:
+ *
+ * -# @ref CLFFT1D 1D FFT is performed on the first given axis
+ * -# @ref CLFFT1D 1D FFT is performed on the second given axis
+ */
+class CLFFT2D : public IFunction
+{
+public:
+ /** Default Constructor */
+ CLFFT2D(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Initialise the function's source, destinations and border mode.
+ *
+ * @param[in] input Source tensor. Data types supported: F32.
+ * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input.
+ * @param[in] config FFT related configuration
+ */
+ void configure(const ICLTensor *input, ICLTensor *output, const FFT2DInfo &config);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLFFT2D.
+ *
+ * @param[in] input Source tensor info. Data types supported: F32.
+ * @param[in] output Destination tensor info. Data types and data layouts supported: Same as @p input.
+ * @param[in] config FFT related configuration
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const FFT2DInfo &config);
+
+ // Inherited methods overridden:
+ void run() override;
+
+protected:
+ CLMemoryGroup _memory_group;
+ CLFFT1D _first_pass_func;
+ CLFFT1D _second_pass_func;
+ CLTensor _first_pass_tensor;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLFFT2D_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h
new file mode 100644
index 0000000000..0fd2cf3cb1
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLFFTCONVOLUTIONLAYER_H__
+#define __ARM_COMPUTE_CLFFTCONVOLUTIONLAYER_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
+#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
+#include "arm_compute/runtime/CL/functions/CLFFT2D.h"
+#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
+#include "arm_compute/runtime/CL/functions/CLPermute.h"
+#include "arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h"
+#include "arm_compute/runtime/CL/functions/CLReductionOperation.h"
+#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
+#include "arm_compute/runtime/CL/functions/CLReverse.h"
+#include "arm_compute/runtime/CL/functions/CLSlice.h"
+
+namespace arm_compute
+{
+// Forward declarations
+class ICLTensor;
+
+/** Basic function to execute FFT-based convolution on OpenCL. This function calls the following OpenCL functions/kernels:
+ *
+ * -# @ref CLPermute Permute input if NHWC(only NCHW is supported).
+ * -# @ref CLPadLayer Pad input.
+ * -# @ref CLFFT2D Forward transform to the frequency domain.
+ * -# @ref CLComplexPixelWiseMultiplication Complex element-wise product of input and the weights.
+ * -# @ref CLReductionOperation Reduction across channels.
+ * -# @ref CLFFT2D Inverse transform back to the time domain.
+ * -# @ref CLStridedSlice Extract valid output.
+ * -# @ref CLArithmeticAddition Add bias.
+ * -# @ref CLActivationLayer Perform activation.
+ * -# @ref CLPermute Permute output if NHWC(only NCHW is supported).
+ */
+class CLFFTConvolutionLayer : public IFunction
+{
+public:
+ /** Default constructor */
+ CLFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLFFTConvolutionLayer(const CLFFTConvolutionLayer &) = delete;
+ /** Default move constructor */
+ CLFFTConvolutionLayer(CLFFTConvolutionLayer &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLFFTConvolutionLayer &operator=(const CLFFTConvolutionLayer &) = delete;
+ /** Default move assignment operator */
+ CLFFTConvolutionLayer &operator=(CLFFTConvolutionLayer &&) = default;
+ /** Set the input and output tensors.
+ *
+ * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
+ * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ */
+ void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref CLFFTConvolutionLayer
+ *
+ * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
+ * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ // Inherited methods overridden:
+ void run() override;
+ void prepare() override;
+
+private:
+ CLMemoryGroup _memory_group;
+ CLReverse _flip_weights_func;
+ CLPermute _permute_input_func;
+ CLPermute _permute_output_func;
+ CLPermute _permute_weights_func;
+ CLPermute _permute_bias_func;
+ CLPadLayer _pad_input_func;
+ CLPadLayer _pad_weights_func;
+ CLFFT2D _transform_input_func;
+ CLFFT2D _transform_weights_func;
+ CLFFT2D _itransform_output_func;
+ CLComplexPixelWiseMultiplication _prod_func;
+ CLReductionOperation _reduce_func;
+ CLSlice _extract_output_func;
+ CLArithmeticAddition _bias_add_func;
+ CLActivationLayer _activation_layer_func;
+
+ CLTensor _permuted_input;
+ CLTensor _permuted_weights;
+ CLTensor _permuted_bias;
+ CLTensor _permuted_output;
+ CLTensor _padded_input;
+ CLTensor _padded_weights;
+ CLTensor _flip_axis;
+ CLTensor _flipped_weights;
+ CLTensor _transformed_input;
+ CLTensor _transformed_weights;
+ CLTensor _input_weights_product;
+ CLTensor _output_product;
+ CLTensor _output_reduced;
+ CLTensor _itransformed_output;
+ CLTensor _reshaped_output;
+ CLTensor _bias_output;
+
+ const ICLTensor *_original_weights;
+ const ICLTensor *_original_bias;
+ bool _is_activationlayer_enabled;
+ bool _needs_permute;
+ bool _has_bias;
+ bool _is_prepared;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLFFTCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
index a59fb4aba8..0fa40a77f2 100644
--- a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
+++ b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,6 +29,7 @@
namespace arm_compute
{
+// Forward declaration
class ICLTensor;
/** Basic function to run @ref CLPixelWiseMultiplicationKernel. */
@@ -64,5 +65,27 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
};
-}
+
+/** Basic function to run @ref CLComplexPixelWiseMultiplicationKernel. */
+class CLComplexPixelWiseMultiplication : public ICLSimpleFunction
+{
+public:
+ /** Initialise the kernel's inputs, output.
+ *
+ * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2.
+ * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+ * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+ * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ */
+ void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplication
+ *
+ * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2.
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[in] output The output tensor info, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+};
+} // namespace arm_compute
#endif /*__ARM_COMPUTE_CLPIXELWISEMULTIPLICATION_H__ */
diff --git a/arm_compute/runtime/FunctionDescriptors.h b/arm_compute/runtime/FunctionDescriptors.h
index 7ff25019e6..f9b16e4218 100644
--- a/arm_compute/runtime/FunctionDescriptors.h
+++ b/arm_compute/runtime/FunctionDescriptors.h
@@ -24,12 +24,29 @@
#ifndef __ARM_COMPUTE_RUNTIME_FUNCTION_DESCRIPTORS_H__
#define __ARM_COMPUTE_RUNTIME_FUNCTION_DESCRIPTORS_H__
+#include <utility>
+
namespace arm_compute
{
-/** Descriptor used by the FFT1d function */
+/** FFT direction to use */
+enum class FFTDirection
+{
+ Forward,
+ Inverse
+};
+
+/** Descriptor used by the FFT1D function */
struct FFT1DInfo
{
- unsigned int axis{ 0 }; /**< Axis to run the FFT on. */
+ unsigned int axis{ 0 }; /**< Axis to run the FFT on. */
+ FFTDirection direction{ FFTDirection::Forward }; /**< Direction of the FFT. */
+};
+
+/** Descriptor used by the FFT2D function */
+struct FFT2DInfo
+{
+ std::pair<unsigned int, unsigned int> axes{ 0, 1 }; /**< Axes to run on. If same, multiple transforms are performed on single axis*/
+ FFTDirection direction{ FFTDirection::Forward }; /**< Direction of the FFT. */
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_RUNTIME_FUNCTION_DESCRIPTORS_H__ */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 4fa8ac4142..322ff517d9 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -219,7 +219,6 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "depthwise_convolution_3x3_f16", "depthwise_convolution.cl" },
{ "depthwise_convolution_3x3_nhwc", "depthwise_convolution.cl" },
{ "depthwise_convolution_3x3_nhwc_stride1", "depthwise_convolution.cl" },
- { "digit_reverse", "fft.cl" },
{ "dwc_3x3_native_qasymm8_nchw", "depthwise_convolution_quantized.cl" },
{ "dwc_3x3_native_qasymm8_dot8_nchw", "depthwise_convolution_quantized.cl" },
{ "dwc_3x3_reshaped_qasymm8_nhwc", "depthwise_convolution_quantized.cl" },
@@ -261,18 +260,33 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "elementwise_unary", "elementwise_unary.cl" },
{ "erode", "erode.cl" },
{ "fast_corners", "fast_corners.cl" },
+ { "fft_digit_reverse_axis_0", "fft_digit_reverse.cl" },
+ { "fft_digit_reverse_axis_1", "fft_digit_reverse.cl" },
{ "fft_radix_2_first_stage_axis_0", "fft.cl" },
+ { "fft_radix_2_first_stage_axis_1", "fft.cl" },
{ "fft_radix_2_axis_0", "fft.cl" },
+ { "fft_radix_2_axis_1", "fft.cl" },
{ "fft_radix_3_first_stage_axis_0", "fft.cl" },
+ { "fft_radix_3_first_stage_axis_1", "fft.cl" },
{ "fft_radix_3_axis_0", "fft.cl" },
+ { "fft_radix_3_axis_1", "fft.cl" },
{ "fft_radix_4_first_stage_axis_0", "fft.cl" },
+ { "fft_radix_4_first_stage_axis_1", "fft.cl" },
{ "fft_radix_4_axis_0", "fft.cl" },
+ { "fft_radix_4_axis_1", "fft.cl" },
{ "fft_radix_5_first_stage_axis_0", "fft.cl" },
+ { "fft_radix_5_first_stage_axis_1", "fft.cl" },
{ "fft_radix_5_axis_0", "fft.cl" },
+ { "fft_radix_5_axis_1", "fft.cl" },
{ "fft_radix_7_first_stage_axis_0", "fft.cl" },
+ { "fft_radix_7_first_stage_axis_1", "fft.cl" },
{ "fft_radix_7_axis_0", "fft.cl" },
+ { "fft_radix_7_axis_1", "fft.cl" },
{ "fft_radix_8_first_stage_axis_0", "fft.cl" },
+ { "fft_radix_8_first_stage_axis_1", "fft.cl" },
{ "fft_radix_8_axis_0", "fft.cl" },
+ { "fft_radix_8_axis_1", "fft.cl" },
+ { "fft_scale_conj", "fft_scale.cl" },
{ "fill_image_borders_constant", "fill_border.cl" },
{ "fill_image_borders_replicate", "fill_border.cl" },
{ "finalize", "optical_flow_pyramid_lk.cl" },
@@ -391,6 +405,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "NV21_to_YUV444_bt709", "color_convert.cl" },
{ "output_stage_quantized", "direct_convolution_1x1_3x3_5x5_quantized.cl" },
{ "permute", "permute.cl" },
+ { "pixelwise_mul_complex", "pixelwise_mul_float.cl" },
{ "pixelwise_mul_float", "pixelwise_mul_float.cl" },
{ "pixelwise_mul_int", "pixelwise_mul_int.cl" },
{ "pixelwise_mul_quantized", "pixelwise_mul_int.cl" },
@@ -710,6 +725,14 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/fft.clembed"
},
{
+ "fft_digit_reverse.cl",
+#include "./cl_kernels/fft_digit_reverse.clembed"
+ },
+ {
+ "fft_scale.cl",
+#include "./cl_kernels/fft_scale.clembed"
+ },
+ {
"fill_border.cl",
#include "./cl_kernels/fill_border.clembed"
},
diff --git a/src/core/CL/cl_kernels/fft.cl b/src/core/CL/cl_kernels/fft.cl
index 5f1ef2483b..0027fd5b66 100644
--- a/src/core/CL/cl_kernels/fft.cl
+++ b/src/core/CL/cl_kernels/fft.cl
@@ -23,48 +23,6 @@
*/
#include "helpers.h"
-/** Computes the digit reverse stage
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] idx_ptr Pointer to the index tensor. Supported data types: U32
- * @param[in] idx_stride_x Stride of the index tensor in X dimension (in bytes)
- * @param[in] idx_step_x idx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] idx_offset_first_element_in_bytes The offset of the first element in the index tensor
- */
-__kernel void digit_reverse(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- VECTOR_DECLARATION(idx))
-{
- // Get tensor pointers
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
- Vector idx = CONVERT_TO_VECTOR_STRUCT(idx);
-
- const unsigned int iidx = *((__global uint *)(idx.ptr));
-
- // Load data
- float2 data = vload2(0, (__global float *)tensor3D_offset(&src, iidx, get_global_id(1), get_global_id(2)));
-
- // Store result
- vstore2(data, 0, (__global float *)dst.ptr);
-}
-
/** Calculates and applies the twiddle factor to a given input.
*
* @param[in] phi The angle.
@@ -252,7 +210,7 @@ __kernel void digit_reverse(
c7 = s4 + t1; \
}
-/** Computes the first stage of a radix-2 DFT.
+/** Computes the first stage of a radix-2 DFT on axis 0.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -264,14 +222,14 @@ __kernel void digit_reverse(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
*/
kernel void fft_radix_2_first_stage_axis_0(
TENSOR3D_DECLARATION(input)
@@ -289,17 +247,17 @@ kernel void fft_radix_2_first_stage_axis_0(
Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
#endif /* IN_PLACE */
- // Load eight complex input values
+ // Load two complex input values
float4 data = vload4(0, (__global float *)input.ptr);
// Compute DFT N = 2
DFT_2(data.s01, data.s23);
- // Store eight complex output values
+ // Store two complex output values
vstore4(data, 0, (__global float *)output.ptr);
}
-/** Computes the first stage of a radix-3 DFT.
+/** Computes the first stage of a radix-2 DFT on axis 1.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -311,14 +269,63 @@ kernel void fft_radix_2_first_stage_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ */
+kernel void fft_radix_2_first_stage_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+)
+{
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+#endif /* IN_PLACE */
+
+ // Load two complex input values
+ float2 data1 = vload2(0, (__global float *)input.ptr);
+ float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0));
+
+ // Compute DFT N = 2
+ DFT_2(data1, data2);
+
+ // Store two complex output values
+ vstore2(data1, 0, (__global float *)output.ptr);
+ vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0));
+}
+
+/** Computes the first stage of a radix-3 DFT on axis 0.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
*/
kernel void fft_radix_3_first_stage_axis_0(
TENSOR3D_DECLARATION(input)
@@ -336,19 +343,19 @@ kernel void fft_radix_3_first_stage_axis_0(
Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
#endif /* IN_PLACE */
- // Load eight complex input values
+ // Load three complex input values
float4 data0 = vload4(0, (__global float *)input.ptr);
float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 2, 0, 0));
// Compute DFT N = 3
DFT_3(data0.s01, data0.s23, data1.s01);
- // Store eight complex output values
+ // Store three complex output values
vstore4(data0, 0, (__global float *)output.ptr);
vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 2, 0, 0));
}
-/** Computes the first stage of a radix-4 DFT.
+/** Computes the first stage of a radix-3 DFT on axis 1.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -360,14 +367,65 @@ kernel void fft_radix_3_first_stage_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ */
+kernel void fft_radix_3_first_stage_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+)
+{
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+#endif /* IN_PLACE */
+
+ // Load three complex input values
+ float2 data0 = vload2(0, (__global float *)input.ptr);
+ float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0));
+ float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0));
+
+ // Compute DFT N = 3
+ DFT_3(data0, data1, data2);
+
+ // Store three complex output values
+ vstore2(data0, 0, (__global float *)output.ptr);
+ vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0));
+ vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0));
+}
+
+/** Computes the first stage of a radix-4 DFT on axis 0.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
*/
kernel void fft_radix_4_first_stage_axis_0(
TENSOR3D_DECLARATION(input)
@@ -385,17 +443,70 @@ kernel void fft_radix_4_first_stage_axis_0(
Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
#endif /* IN_PLACE */
- // Load eight complex input values
+ // Load four complex input values
float8 data = vload8(0, (__global float *)input.ptr);
// Compute DFT N = 4
DFT_4(data.s01, data.s23, data.s45, data.s67);
- // Store eight complex output values
+ // Store four complex output values
vstore8(data, 0, (__global float *)output.ptr);
}
-/** Computes the first stage of a radix-5 DFT.
+/** Computes the first stage of a radix-4 DFT on axis 1.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ */
+kernel void fft_radix_4_first_stage_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+)
+{
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+#endif /* IN_PLACE */
+
+ // Load four complex input values
+ float2 data0 = vload2(0, (__global float *)input.ptr);
+ float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0));
+ float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0));
+ float2 data3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3, 0));
+
+ // Compute DFT N = 4
+ DFT_4(data0, data1, data2, data3);
+
+ // Store four complex output values
+ vstore2(data0, 0, (__global float *)output.ptr);
+ vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0));
+ vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0));
+ vstore2(data3, 0, (__global float *)tensor3D_offset(&output, 0, 3, 0));
+}
+
+/** Computes the first stage of a radix-5 DFT on axis 0.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -407,14 +518,14 @@ kernel void fft_radix_4_first_stage_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
*/
kernel void fft_radix_5_first_stage_axis_0(
TENSOR3D_DECLARATION(input)
@@ -432,19 +543,19 @@ kernel void fft_radix_5_first_stage_axis_0(
Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
#endif /* IN_PLACE */
- // Load eight complex input values
+ // Load five complex input values
float8 data0 = vload8(0, (__global float *)input.ptr);
float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 4, 0, 0));
// Compute DFT N = 5
DFT_5(data0.s01, data0.s23, data0.s45, data0.s67, data1.s01);
- // Store eight complex output values
+ // Store five complex output values
vstore8(data0, 0, (__global float *)output.ptr);
vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 4, 0, 0));
}
-/** Computes the first stage of a radix-7 DFT.
+/** Computes the first stage of a radix-5 DFT on axis 1.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -456,14 +567,69 @@ kernel void fft_radix_5_first_stage_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ */
+kernel void fft_radix_5_first_stage_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+)
+{
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+#endif /* IN_PLACE */
+
+ // Load five complex input values
+ float2 data0 = vload2(0, (__global float *)input.ptr);
+ float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0));
+ float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0));
+ float2 data3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3, 0));
+ float2 data4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4, 0));
+
+ // Compute DFT N = 5
+ DFT_5(data0, data1, data2, data3, data4);
+
+ // Store five complex output values
+ vstore2(data0, 0, (__global float *)output.ptr);
+ vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0));
+ vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0));
+ vstore2(data3, 0, (__global float *)tensor3D_offset(&output, 0, 3, 0));
+ vstore2(data4, 0, (__global float *)tensor3D_offset(&output, 0, 4, 0));
+}
+
+/** Computes the first stage of a radix-7 DFT on axis 0.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
*/
kernel void fft_radix_7_first_stage_axis_0(
TENSOR3D_DECLARATION(input)
@@ -481,7 +647,7 @@ kernel void fft_radix_7_first_stage_axis_0(
Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
#endif /* IN_PLACE */
- // Load eight complex input values
+ // Load seven complex input values
float8 data0 = vload8(0, (__global float *)input.ptr);
float4 data1 = vload4(0, (__global float *)tensor3D_offset(&input, 4, 0, 0));
float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 6, 0, 0));
@@ -489,13 +655,72 @@ kernel void fft_radix_7_first_stage_axis_0(
// Compute DFT N = 7
DFT_7(data0.s01, data0.s23, data0.s45, data0.s67, data1.s01, data1.s23, data2.s01);
- // Store eight complex output values
+ // Store seven complex output values
vstore8(data0, 0, (__global float *)output.ptr);
vstore4(data1, 0, (__global float *)tensor3D_offset(&output, 4, 0, 0));
vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 6, 0, 0));
}
-/** Computes the first stage of a radix-8 DFT.
+/** Computes the first stage of a radix-7 DFT on axis 1.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ */
+kernel void fft_radix_7_first_stage_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+)
+{
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+#endif /* IN_PLACE */
+
+ // Load seven complex input values
+ float2 data0 = vload2(0, (__global float *)input.ptr);
+ float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0));
+ float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0));
+ float2 data3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3, 0));
+ float2 data4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4, 0));
+ float2 data5 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 5, 0));
+ float2 data6 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 6, 0));
+
+ // Compute DFT N = 7
+ DFT_7(data0, data1, data2, data3, data4, data5, data6);
+
+ // Store seven complex output values
+ vstore2(data0, 0, (__global float *)output.ptr);
+ vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0));
+ vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0));
+ vstore2(data3, 0, (__global float *)tensor3D_offset(&output, 0, 3, 0));
+ vstore2(data4, 0, (__global float *)tensor3D_offset(&output, 0, 4, 0));
+ vstore2(data5, 0, (__global float *)tensor3D_offset(&output, 0, 5, 0));
+ vstore2(data6, 0, (__global float *)tensor3D_offset(&output, 0, 6, 0));
+}
+
+/** Computes the first stage of a radix-8 DFT on axis 0.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -507,14 +732,14 @@ kernel void fft_radix_7_first_stage_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
*/
kernel void fft_radix_8_first_stage_axis_0(
TENSOR3D_DECLARATION(input)
@@ -542,7 +767,68 @@ kernel void fft_radix_8_first_stage_axis_0(
vstore16(data, 0, (__global float *)output.ptr);
}
-/** Computes a stage of a radix-2 FFT.
+/** Computes the first stage of a radix-8 DFT on axis 1.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ */
+kernel void fft_radix_8_first_stage_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+)
+{
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+#endif /* IN_PLACE */
+
+ // Load eight complex input values
+ float2 data0 = vload2(0, (__global float *)input.ptr);
+ float2 data1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 1, 0));
+ float2 data2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2, 0));
+ float2 data3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3, 0));
+ float2 data4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4, 0));
+ float2 data5 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 5, 0));
+ float2 data6 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 6, 0));
+ float2 data7 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 7, 0));
+
+ // Compute DFT N = 8
+ DFT_8(data0, data1, data2, data3, data4, data5, data6, data7);
+
+ // Store eight complex output values
+ vstore2(data0, 0, (__global float *)output.ptr);
+ vstore2(data1, 0, (__global float *)tensor3D_offset(&output, 0, 1, 0));
+ vstore2(data2, 0, (__global float *)tensor3D_offset(&output, 0, 2, 0));
+ vstore2(data3, 0, (__global float *)tensor3D_offset(&output, 0, 3, 0));
+ vstore2(data4, 0, (__global float *)tensor3D_offset(&output, 0, 4, 0));
+ vstore2(data5, 0, (__global float *)tensor3D_offset(&output, 0, 5, 0));
+ vstore2(data6, 0, (__global float *)tensor3D_offset(&output, 0, 6, 0));
+ vstore2(data7, 0, (__global float *)tensor3D_offset(&output, 0, 7, 0));
+}
+
+/** Computes a stage of a radix-2 FFT on axis 0.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -554,14 +840,14 @@ kernel void fft_radix_8_first_stage_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
* @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
* @param[in] Ni Nx * Ny.
* @param[in] exp_const Exponent constant
@@ -612,7 +898,77 @@ kernel void fft_radix_2_axis_0(
vstore2(c1, 0, (__global float *)tensor3D_offset(&output, Nx, 0, 0));
}
-/** Computes a stage of a radix-3 FFT.
+/** Computes a stage of a radix-2 FFT on axis 1.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ * @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
+ * @param[in] Ni Nx * Ny.
+ * @param[in] exp_const Exponent constant
+ */
+kernel void fft_radix_2_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+ ,
+ uint Nx, uint Ni, float exp_const)
+{
+ // Each work-item computes a single radix-2
+ uint kx = get_global_id(1);
+
+ // Compute nx
+ uint nx = kx % Nx;
+
+ // Compute n index
+ uint n = nx + (kx / Nx) * Ni;
+
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input);
+ input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z;
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
+ output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z;
+#endif /* IN_PLACE */
+
+ // Load two complex input values
+ float2 c0 = vload2(0, (__global float *)input.ptr);
+ float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0));
+
+ // Compute phi
+ float phi = (float)nx * exp_const;
+
+ // Multiply by twiddle factor
+ TWIDDLE_FACTOR_MULTIPLICATION(phi, c1);
+
+ // Compute DFT N = 2
+ DFT_2(c0, c1);
+
+ // Store two complex output values
+ vstore2(c0, 0, (__global float *)output.ptr);
+ vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0));
+}
+
+/** Computes a stage of a radix-3 FFT on axis 0.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -624,14 +980,14 @@ kernel void fft_radix_2_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
* @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
* @param[in] Ni Nx * Ny.
* @param[in] exp_const Exponent constant
@@ -685,7 +1041,80 @@ kernel void fft_radix_3_axis_0(
vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 2 * Nx, 0, 0));
}
-/** Computes a stage of a radix-4 FFT.
+/** Computes a stage of a radix-3 FFT on axis 1.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ * @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
+ * @param[in] Ni Nx * Ny.
+ * @param[in] exp_const Exponent constant
+ */
+kernel void fft_radix_3_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+ ,
+ uint Nx, uint Ni, float exp_const)
+{
+ // Each work-item computes a single radix-3
+ uint kx = get_global_id(1);
+
+ // Compute nx
+ uint nx = kx % Nx;
+
+ // Compute n index
+ uint n = nx + (kx / Nx) * Ni;
+
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input);
+ input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z;
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
+ output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z;
+#endif /* IN_PLACE */
+
+ // Load three complex input values
+ float2 c0 = vload2(0, (__global float *)input.ptr);
+ float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0));
+ float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0));
+
+ // Compute phi
+ float phi = (float)nx * exp_const;
+
+ // Multiply by twiddle factor
+ TWIDDLE_FACTOR_MULTIPLICATION(phi, c1);
+ TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2);
+
+ // Compute DFT N = 3
+ DFT_3(c0, c1, c2);
+
+ // Store three complex output values
+ vstore2(c0, 0, (__global float *)output.ptr);
+ vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0));
+ vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0));
+}
+
+/** Computes a stage of a radix-4 FFT on axis 0.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -697,14 +1126,14 @@ kernel void fft_radix_3_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
* @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
* @param[in] Ni Nx * Ny.
* @param[in] exp_const Exponent constant
@@ -761,7 +1190,7 @@ kernel void fft_radix_4_axis_0(
vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 3 * Nx, 0, 0));
}
-/** Computes a stage of a radix-5 FFT.
+/** Computes a stage of a radix-4 FFT on axis 1.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -773,14 +1202,90 @@ kernel void fft_radix_4_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ * @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
+ * @param[in] Ni Nx * Ny.
+ * @param[in] exp_const Exponent constant
+ */
+kernel void fft_radix_4_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+ ,
+ uint Nx, uint Ni, float exp_const)
+{
+ // Each work-item computes a single radix-4
+ uint kx = get_global_id(1);
+
+ // Compute nx
+ uint nx = kx % Nx;
+
+ // Compute n index
+ uint n = nx + (kx / Nx) * Ni;
+
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input);
+ input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z;
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
+ output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z;
+#endif /* IN_PLACE */
+
+ // Load four complex input values
+ float2 c0 = vload2(0, (__global float *)input.ptr);
+ float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0));
+ float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0));
+ float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3 * Nx, 0));
+
+ // Compute phi
+ float phi = (float)nx * exp_const;
+
+ // Multiply by twiddle factor
+ TWIDDLE_FACTOR_MULTIPLICATION(phi, c1);
+ TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2);
+ TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3);
+
+ // Compute DFT N = 4
+ DFT_4(c0, c1, c2, c3);
+
+ // Store four complex output values
+ vstore2(c0, 0, (__global float *)output.ptr);
+ vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0));
+ vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0));
+ vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 0, 3 * Nx, 0));
+}
+
+/** Computes a stage of a radix-5 FFT on axis 0.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
* @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
* @param[in] Ni Nx * Ny.
* @param[in] exp_const Exponent constant
@@ -840,7 +1345,7 @@ kernel void fft_radix_5_axis_0(
vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 4 * Nx, 0, 0));
}
-/** Computes a stage of a radix-7 FFT.
+/** Computes a stage of a radix-5 FFT on axis 1.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -852,14 +1357,93 @@ kernel void fft_radix_5_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ * @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
+ * @param[in] Ni Nx * Ny.
+ * @param[in] exp_const Exponent constant
+ */
+kernel void fft_radix_5_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+ ,
+ uint Nx, uint Ni, float exp_const)
+{
+ // Each work-item computes a single radix-5
+ uint kx = get_global_id(1);
+
+ // Compute nx
+ uint nx = kx % Nx;
+
+ // Compute n index
+ uint n = nx + (kx / Nx) * Ni;
+
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input);
+ input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z;
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
+ output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z;
+#endif /* IN_PLACE */
+
+ // Load five complex input values
+ float2 c0 = vload2(0, (__global float *)input.ptr);
+ float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0));
+ float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0));
+ float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3 * Nx, 0));
+ float2 c4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4 * Nx, 0));
+
+ // Compute phi
+ float phi = (float)nx * exp_const;
+
+ // Multiply by twiddle factor
+ TWIDDLE_FACTOR_MULTIPLICATION(phi, c1);
+ TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2);
+ TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3);
+ TWIDDLE_FACTOR_MULTIPLICATION(4 * phi, c4);
+
+ // Compute DFT N = 5
+ DFT_5(c0, c1, c2, c3, c4);
+
+ // Store five complex output values
+ vstore2(c0, 0, (__global float *)output.ptr);
+ vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0));
+ vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0));
+ vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 0, 3 * Nx, 0));
+ vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 0, 4 * Nx, 0));
+}
+
+/** Computes a stage of a radix-7 FFT on axis 0.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
* @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
* @param[in] Ni Nx * Ny.
* @param[in] exp_const Exponent constant
@@ -925,7 +1509,92 @@ kernel void fft_radix_7_axis_0(
vstore2(c6, 0, (__global float *)tensor3D_offset(&output, 6 * Nx, 0, 0));
}
-/** Computes a stage of a radix-8 FFT.
+/** Computes a stage of a radix-7 FFT on axis 1.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ * @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
+ * @param[in] Ni Nx * Ny.
+ * @param[in] exp_const Exponent constant
+ */
+kernel void fft_radix_7_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+ ,
+ uint Nx, uint Ni, float exp_const)
+{
+ // Each work-item computes a single radix-7
+ uint kx = get_global_id(1);
+
+ // Compute nx
+ uint nx = kx % Nx;
+
+ // Compute n index
+ uint n = nx + (kx / Nx) * Ni;
+
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input);
+ input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z;
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
+ output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z;
+#endif /* IN_PLACE */
+
+ // Load seven complex input values
+ float2 c0 = vload2(0, (__global float *)input.ptr);
+ float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0));
+ float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0));
+ float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3 * Nx, 0));
+ float2 c4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4 * Nx, 0));
+ float2 c5 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 5 * Nx, 0));
+ float2 c6 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 6 * Nx, 0));
+
+ // Compute phi
+ float phi = (float)nx * exp_const;
+
+ // Multiply by twiddle factor
+ TWIDDLE_FACTOR_MULTIPLICATION(phi, c1);
+ TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2);
+ TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3);
+ TWIDDLE_FACTOR_MULTIPLICATION(4 * phi, c4);
+ TWIDDLE_FACTOR_MULTIPLICATION(5 * phi, c5);
+ TWIDDLE_FACTOR_MULTIPLICATION(6 * phi, c6);
+
+ // Compute DFT N = 7
+ DFT_7(c0, c1, c2, c3, c4, c5, c6);
+
+ // Store seven complex output values
+ vstore2(c0, 0, (__global float *)output.ptr);
+ vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0));
+ vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0));
+ vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 0, 3 * Nx, 0));
+ vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 0, 4 * Nx, 0));
+ vstore2(c5, 0, (__global float *)tensor3D_offset(&output, 0, 5 * Nx, 0));
+ vstore2(c6, 0, (__global float *)tensor3D_offset(&output, 0, 6 * Nx, 0));
+}
+
+/** Computes a stage of a radix-8 FFT on axis 0.
*
* @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
@@ -937,14 +1606,14 @@ kernel void fft_radix_7_axis_0(
* @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
* @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
* @param[in] Ni Nx * Ny.
* @param[in] exp_const Exponent constant
@@ -1011,4 +1680,92 @@ kernel void fft_radix_8_axis_0(
vstore2(c5, 0, (__global float *)tensor3D_offset(&output, 5 * Nx, 0, 0));
vstore2(c6, 0, (__global float *)tensor3D_offset(&output, 6 * Nx, 0, 0));
vstore2(c7, 0, (__global float *)tensor3D_offset(&output, 7 * Nx, 0, 0));
+}
+
+/** Computes a stage of a radix-8 FFT on axis 1.
+ *
+ * @note In order to perform the FFT function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
+ *
+ * @param[in,out] input_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in,out] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in,out] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in,out] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in,out] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in,out] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in,out] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in,out] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
+ * @param[in] Nx The butterfly span. Products of radix order of previous radix's stage
+ * @param[in] Ni Nx * Ny.
+ * @param[in] exp_const Exponent constant
+ */
+kernel void fft_radix_8_axis_1(
+ TENSOR3D_DECLARATION(input)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(output)
+#endif /* not IN_PLACE */
+ ,
+ uint Nx, uint Ni, float exp_const)
+{
+ // Each work-item computes a single radix-8
+ uint kx = get_global_id(1);
+
+ // Compute nx
+ uint nx = kx % Nx;
+
+ // Compute n index
+ uint n = nx + (kx / Nx) * Ni;
+
+ // Get tensor pointers
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input);
+ input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z;
+#ifdef IN_PLACE
+ Tensor3D output = input;
+#else /* IN_PLACE */
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
+ output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z;
+#endif /* IN_PLACE */
+
+ // Load eight complex input values
+ float2 c0 = vload2(0, (__global float *)input.ptr);
+ float2 c1 = vload2(0, (__global float *)tensor3D_offset(&input, 0, Nx, 0));
+ float2 c2 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 2 * Nx, 0));
+ float2 c3 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 3 * Nx, 0));
+ float2 c4 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 4 * Nx, 0));
+ float2 c5 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 5 * Nx, 0));
+ float2 c6 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 6 * Nx, 0));
+ float2 c7 = vload2(0, (__global float *)tensor3D_offset(&input, 0, 7 * Nx, 0));
+
+ // Compute phi
+ float phi = (float)nx * exp_const;
+
+ // Multiply by twiddle factor
+ TWIDDLE_FACTOR_MULTIPLICATION(phi, c1);
+ TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2);
+ TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3);
+ TWIDDLE_FACTOR_MULTIPLICATION(4 * phi, c4);
+ TWIDDLE_FACTOR_MULTIPLICATION(5 * phi, c5);
+ TWIDDLE_FACTOR_MULTIPLICATION(6 * phi, c6);
+ TWIDDLE_FACTOR_MULTIPLICATION(7 * phi, c7);
+
+ // Compute DFT N = 8
+ DFT_8(c0, c1, c2, c3, c4, c5, c6, c7);
+
+ // Store eight complex output values
+ vstore2(c0, 0, (__global float *)output.ptr);
+ vstore2(c1, 0, (__global float *)tensor3D_offset(&output, 0, Nx, 0));
+ vstore2(c2, 0, (__global float *)tensor3D_offset(&output, 0, 2 * Nx, 0));
+ vstore2(c3, 0, (__global float *)tensor3D_offset(&output, 0, 3 * Nx, 0));
+ vstore2(c4, 0, (__global float *)tensor3D_offset(&output, 0, 4 * Nx, 0));
+ vstore2(c5, 0, (__global float *)tensor3D_offset(&output, 0, 5 * Nx, 0));
+ vstore2(c6, 0, (__global float *)tensor3D_offset(&output, 0, 6 * Nx, 0));
+ vstore2(c7, 0, (__global float *)tensor3D_offset(&output, 0, 7 * Nx, 0));
} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/fft_digit_reverse.cl b/src/core/CL/cl_kernels/fft_digit_reverse.cl
new file mode 100644
index 0000000000..040c2846bd
--- /dev/null
+++ b/src/core/CL/cl_kernels/fft_digit_reverse.cl
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(VEC_SIZE)
+/** Computes the digit reverse stage on axis X
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] idx_ptr Pointer to the index tensor. Supported data types: U32
+ * @param[in] idx_stride_x Stride of the index tensor in X dimension (in bytes)
+ * @param[in] idx_step_x idx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] idx_offset_first_element_in_bytes The offset of the first element in the index tensor
+ */
+__kernel void fft_digit_reverse_axis_0(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ VECTOR_DECLARATION(idx))
+{
+ // Get tensor pointers
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(src);
+ Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
+ Vector idx = CONVERT_TO_VECTOR_STRUCT(idx);
+
+ const unsigned int iidx = *((__global uint *)(idx.ptr));
+
+ // Load data
+#if VEC_SIZE == 1
+ float data = *((__global float *)tensor3D_offset(&src, iidx, get_global_id(1), get_global_id(2)));
+#elif VEC_SIZE == 2
+ float2 data = vload2(0, (__global float *)tensor3D_offset(&src, iidx, get_global_id(1), get_global_id(2)));
+#else // VEC_SIZE == 1
+#error "vec_size of 1 and 2 are supported"
+#endif // VEC_SIZE == 1
+
+ // Create result
+#if VEC_SIZE == 1
+ float2 res = { data, 0 };
+#elif VEC_SIZE == 2
+ float2 res = data;
+#else // VEC_SIZE == 1
+#error "vec_size of 1 and 2 are supported"
+#endif // VEC_SIZE == 1
+
+ // Store result
+#if defined(CONJ)
+ vstore2((float2)(res.s0, -res.s1), 0, (__global float *)dst.ptr);
+#else // defined(CONJ)
+ vstore2(res, 0, (__global float *)dst.ptr);
+#endif // defined(CONJ)
+}
+
+/** Computes the digit reverse stage on axis Y
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] idx_ptr Pointer to the index tensor. Supported data types: U32
+ * @param[in] idx_stride_x Stride of the index tensor in X dimension (in bytes)
+ * @param[in] idx_step_x idx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] idx_offset_first_element_in_bytes The offset of the first element in the index tensor
+ */
+__kernel void fft_digit_reverse_axis_1(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ VECTOR_DECLARATION(idx))
+{
+ // Get tensor pointers
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(src);
+ Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
+ Vector idx = CONVERT_TO_VECTOR_STRUCT_NO_STEP(idx);
+
+ const unsigned int iidx = *((__global uint *)vector_offset(&idx, (int)(get_global_id(1))));
+
+ // Load data
+#if VEC_SIZE == 1
+ float data = *((__global float *)tensor3D_offset(&src, get_global_id(0), iidx, get_global_id(2)));
+#elif VEC_SIZE == 2
+ float2 data = vload2(0, (__global float *)tensor3D_offset(&src, get_global_id(0), iidx, get_global_id(2)));
+#else // VEC_SIZE == 1
+#error "vec_size of 1 and 2 are supported"
+#endif // VEC_SIZE == 1
+
+ // Create result
+#if VEC_SIZE == 1
+ float2 res = { data, 0 };
+#elif VEC_SIZE == 2
+ float2 res = data;
+#else // VEC_SIZE == 1
+#error "vec_size of 1 and 2 are supported"
+#endif // VEC_SIZE == 1
+
+ // Store result
+#if defined(CONJ)
+ vstore2((float2)(res.s0, -res.s1), 0, (__global float *)dst.ptr);
+#else // defined(CONJ)
+ vstore2(res, 0, (__global float *)dst.ptr);
+#endif // defined(CONJ)
+}
+#endif // defined(VEC_SIZE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/fft_scale.cl b/src/core/CL/cl_kernels/fft_scale.cl
new file mode 100644
index 0000000000..bf78a26eb8
--- /dev/null
+++ b/src/core/CL/cl_kernels/fft_scale.cl
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+/** Computes the fft scale stage
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x (Optional) dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y (Optional) Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y (Optional) dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z (Optional) dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
+ * @param[in] scale Scale to apply to the complex value
+ */
+__kernel void fft_scale_conj(
+ TENSOR3D_DECLARATION(src)
+#ifndef IN_PLACE
+ ,
+ TENSOR3D_DECLARATION(dst)
+#endif /* not IN_PLACE */
+ ,
+ float scale)
+{
+ // Get tensor pointers
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+#if defined(IN_PLACE)
+ Tensor3D dst = src;
+#else /* IN_PLACE */
+ Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
+#endif /* IN_PLACE */
+
+ // Store result
+#if VEC_SIZE == 1
+ *((__global float *)dst.ptr) = (*(__global float *)src.ptr) / scale;
+#elif VEC_SIZE == 2
+ // Load data
+ float2 data = vload2(0, (__global float *)src.ptr);
+ data /= scale;
+#if defined(CONJ)
+ vstore2((float2)(data.s0, -data.s1), 0, (__global float *)dst.ptr);
+#else // defined(CONJ)
+ vstore2(data, 0, (__global float *)dst.ptr);
+#endif // defined(CONJ)
+#else // VEC_SIZE == 1
+#error "vec_size of 1 and 2 are supported"
+#endif // VEC_SIZE == 1
+} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/pixelwise_mul_float.cl b/src/core/CL/cl_kernels/pixelwise_mul_float.cl
index 9fa540e946..d0e04b2ffe 100644
--- a/src/core/CL/cl_kernels/pixelwise_mul_float.cl
+++ b/src/core/CL/cl_kernels/pixelwise_mul_float.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -94,4 +94,52 @@ __kernel void pixelwise_mul_float(
// Store result
vstore16(res, 0, (__global DATA_TYPE_OUT *)out.ptr);
}
-#endif /* defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_RES) && defined(DATA_TYPE_OUT) */ \ No newline at end of file
+#endif /* defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_RES) && defined(DATA_TYPE_OUT) */
+
+/** Performs a pixelwise multiplication of complex float values
+ *
+ * @param[in] in1_ptr Pointer to the source image. Supported data types: F32
+ * @param[in] in1_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] in1_step_x in1_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in1_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] in1_step_y in1_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in1_stride_z Stride of the source image in Y dimension (in bytes)
+ * @param[in] in1_step_z in1_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in1_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] in2_ptr Pointer to the source image. Supported data types: same as @p in1_ptr
+ * @param[in] in2_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] in2_step_x in2_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in2_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] in2_step_y in2_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in2_stride_z Stride of the source image in Y dimension (in bytes)
+ * @param[in] in2_step_z in2_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in2_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[out] out_ptr Pointer to the destination image. Supported data types: same as @p in1_ptr
+ * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
+ * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
+ * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] out_stride_z Stride of the destination image in Y dimension (in bytes)
+ * @param[in] out_step_z out_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void pixelwise_mul_complex(
+ TENSOR3D_DECLARATION(in1),
+ TENSOR3D_DECLARATION(in2),
+ TENSOR3D_DECLARATION(out))
+{
+ // Get pixels pointer
+ Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1);
+ Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+
+ // Load data
+ float2 vin1 = vload2(0, (__global float *)in1.ptr);
+ float2 vin2 = vload2(0, (__global float *)in2.ptr);
+
+ // Perform complex multiplication
+ float2 res = { vin1.x *vin2.x - vin1.y * vin2.y, vin1.x *vin2.y + vin2.x * vin1.y };
+
+ // Store result
+ vstore2(res, 0, (__global float *)out.ptr);
+}
diff --git a/src/core/CL/cl_kernels/reduction_operation.cl b/src/core/CL/cl_kernels/reduction_operation.cl
index b4ede25296..2651123cf5 100644
--- a/src/core/CL/cl_kernels/reduction_operation.cl
+++ b/src/core/CL/cl_kernels/reduction_operation.cl
@@ -307,6 +307,10 @@ __kernel void reduction_operation_z(
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+#if defined(COMPLEX)
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+ res1 = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 8, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+#endif // defined(COMPLEX)
#if defined(SUM_SQUARE)
res *= res;
#endif // defined(SUM_SQUARE)
@@ -320,6 +324,11 @@ __kernel void reduction_operation_z(
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+#if defined(COMPLEX)
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+ in1 = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 8, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+#endif // defined(COMPLEX)
+
#if defined(ARG_MAX)
uint16 cond_conv = CONVERT(isgreater(in, res), uint16);
indx = select(indx, z, cond_conv);
@@ -334,8 +343,11 @@ __kernel void reduction_operation_z(
#endif // defined(SUM_SQUARE)
#if defined(PROD)
res *= in;
-#else //!defined(PROD)
+#else //!defined(PROD)
res += in;
+#if defined(COMPLEX)
+ res1 += in1;
+#endif // defined(COMPLEX)
#endif //defined(PROD)
#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
@@ -348,6 +360,9 @@ __kernel void reduction_operation_z(
res /= DEPTH;
#endif // defined(MEAN)
vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+#if defined(COMPLEX)
+ vstore16(CONVERT(res1, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)tensor3D_offset(&output, 8, 0, 0));
+#endif // defined(COMPLEX)
#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
#endif /* defined(DEPTH) */
diff --git a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
index d72647c3c9..b04293db5b 100644
--- a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
+++ b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
@@ -34,16 +34,19 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, unsigned int axis)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config)
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() != DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != 1 && input->num_channels() != 2);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(idx, 1, DataType::U32);
- ARM_COMPUTE_RETURN_ERROR_ON(axis != 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[config.axis] != idx->tensor_shape().x());
// Checks performed when output is configured
if((output != nullptr) && (output->total_size() != 0))
{
+ ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != 2);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -51,11 +54,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, ITensorInfo *idx, unsigned int axis)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, ITensorInfo *idx, const FFTDigitReverseKernelInfo &config)
{
- ARM_COMPUTE_UNUSED(idx, axis);
+ ARM_COMPUTE_UNUSED(idx, config);
- auto_init_if_empty(*output, *input);
+ auto_init_if_empty(*output, input->clone()->set_num_channels(2));
Window win = calculate_max_window(*output, Steps());
output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
@@ -69,25 +72,30 @@ CLFFTDigitReverseKernel::CLFFTDigitReverseKernel()
{
}
-void CLFFTDigitReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, unsigned int axis)
+void CLFFTDigitReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, idx);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), idx->info(), axis));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), idx->info(), config));
_input = input;
_output = output;
_idx = idx;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("digit_reverse"));
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(input->info()->num_channels()));
+ build_opts.add_option_if(config.conjugate, "-DCONJ");
+ std::string kernel_name = "fft_digit_reverse_axis_" + support::cpp11::to_string(config.axis);
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), idx->info(), axis);
+ auto win_config = validate_and_configure_window(input->info(), output->info(), idx->info(), config);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
// Set config_id for enabling LWS tuning
- _config_id = "digit_reverse_";
+ _config_id = kernel_name;
+ _config_id += "_";
_config_id += lower_string(string_from_data_type(input->info()->data_type()));
_config_id += "_";
_config_id += support::cpp11::to_string(input->info()->dimension(0));
@@ -95,10 +103,10 @@ void CLFFTDigitReverseKernel::configure(const ICLTensor *input, ICLTensor *outpu
_config_id += support::cpp11::to_string(input->info()->dimension(1));
}
-Status CLFFTDigitReverseKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, unsigned int axis)
+Status CLFFTDigitReverseKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, idx, axis));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), idx->clone().get(), axis).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, idx, config));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), idx->clone().get(), config).first);
return Status{};
}
diff --git a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
index 87a12b9da9..83d55b7092 100644
--- a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
+++ b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
@@ -38,12 +38,13 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const FFTRadixStageKernelDescriptor &config)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const FFTRadixStageKernelInfo &config)
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(config.axis != 0);
ARM_COMPUTE_RETURN_ERROR_ON(CLFFTRadixStageKernel::supported_radix().count(config.radix) == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[config.axis] % config.radix);
// Checks performed when output is configured
if((output != nullptr) && (output->total_size() != 0))
@@ -55,14 +56,18 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const FFTRadixStageKernelDescriptor &config)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const FFTRadixStageKernelInfo &config)
{
if(output != nullptr)
{
auto_init_if_empty(*output, *input);
}
- Window win = calculate_max_window(*input, Steps(config.radix));
+ // Setup window steps
+ Steps steps;
+ steps.set(config.axis, config.radix);
+
+ Window win = calculate_max_window(*input, steps);
if(output != nullptr)
{
output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
@@ -77,7 +82,7 @@ CLFFTRadixStageKernel::CLFFTRadixStageKernel()
{
}
-void CLFFTRadixStageKernel::configure(ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelDescriptor &config)
+void CLFFTRadixStageKernel::configure(ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr, config));
@@ -105,7 +110,7 @@ void CLFFTRadixStageKernel::configure(ICLTensor *input, ICLTensor *output, const
unsigned int idx = (1 + (_run_in_place ? 0 : 1)) * num_arguments_per_3D_tensor(); // Skip the input and output parameters
_kernel.setArg<cl_uint>(idx++, config.Nx);
_kernel.setArg<cl_uint>(idx++, Ni);
- _kernel.setArg<cl_float>(idx++, exp_const);
+ _kernel.setArg<cl_float>(idx, exp_const);
}
// Configure kernel window
@@ -123,7 +128,7 @@ void CLFFTRadixStageKernel::configure(ICLTensor *input, ICLTensor *output, const
_config_id += support::cpp11::to_string(input->info()->dimension(1));
}
-Status CLFFTRadixStageKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const FFTRadixStageKernelDescriptor &config)
+Status CLFFTRadixStageKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const FFTRadixStageKernelInfo &config)
{
const bool run_in_place = (output == nullptr) || (output == input);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, config));
diff --git a/src/core/CL/kernels/CLFFTScaleKernel.cpp b/src/core/CL/kernels/CLFFTScaleKernel.cpp
new file mode 100644
index 0000000000..59f1fd7502
--- /dev/null
+++ b/src/core/CL/kernels/CLFFTScaleKernel.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLFFTScaleKernel.h"
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/CLValidate.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32);
+
+ // Checks performed when output is configured
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != 1 && output->num_channels() != 2);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
+{
+ // Configure kernel window
+ Window win = calculate_max_window(*input, Steps());
+
+ if(output != nullptr)
+ {
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output, *input->clone());
+
+ // CLFFTScaleKernel doesn't need padding so update_window_and_padding() can be skipped
+ Coordinates coord;
+ coord.set_num_dimensions(output->num_dimensions());
+ output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
+ }
+
+ return std::make_pair(Status{}, win);
+}
+} // namespace
+
+CLFFTScaleKernel::CLFFTScaleKernel()
+ : _input(nullptr), _output(nullptr), _run_in_place(false)
+{
+}
+
+void CLFFTScaleKernel::configure(ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr));
+
+ _input = input;
+ _output = output;
+ _run_in_place = (output == nullptr) || (output == input);
+
+ // Create kernel
+ CLBuildOptions build_opts;
+ build_opts.add_option_if(_run_in_place, "-DIN_PLACE");
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(output != nullptr ? output->info()->num_channels() : input->info()->num_channels()));
+ build_opts.add_option_if(config.conjugate, "-DCONJ");
+ std::string kernel_name = "fft_scale_conj";
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+
+ // Set static arguments
+ unsigned int idx = (1 + (_run_in_place ? 0 : 1)) * num_arguments_per_3D_tensor(); // Skip the input and output parameters
+ _kernel.setArg<cl_float>(idx, config.scale);
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(input->info(), _run_in_place ? nullptr : output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ ICLKernel::configure_internal(win_config.second);
+
+ // Set config_id for enabling LWS tuning
+ _config_id = kernel_name;
+ _config_id += "_";
+ _config_id += lower_string(string_from_data_type(input->info()->data_type()));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(1));
+}
+
+Status CLFFTScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const FFTScaleKernelInfo &config)
+{
+ ARM_COMPUTE_UNUSED(config);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
+
+ return Status{};
+}
+
+void CLFFTScaleKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+ Window slice = collapsed.first_slice_window_3D();
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice);
+ if(!_run_in_place)
+ {
+ add_3D_tensor_argument(idx, _output, slice);
+ }
+ enqueue(queue, *this, slice, lws_hint());
+ }
+ while(collapsed.slide_window_slice_3D(slice));
+}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
index 286b94ebdc..9fa92bde75 100644
--- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
+++ b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,8 +38,8 @@
#include <set>
#include <string>
-using namespace arm_compute;
-
+namespace arm_compute
+{
namespace
{
constexpr unsigned int num_elems_processed_per_iteration = 16;
@@ -276,3 +276,139 @@ BorderSize CLPixelWiseMultiplicationKernel::border_size() const
const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
return BorderSize(0, border, 0, 0);
}
+
+namespace
+{
+constexpr unsigned int num_elems_processed_per_iteration_complex = 1;
+
+Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 2, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 2, DataType::F32);
+
+ const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
+
+ // Validate in case of configured output
+ if(output->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 2, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window_complex(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
+{
+ const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
+ const TensorShape &out_shape = broadcast_pair.first;
+ const ValidRegion &valid_region = broadcast_pair.second;
+
+ // Auto initialize output if not initialized
+ const TensorInfo out_info(out_shape, input1->num_channels(), input1->data_type());
+ auto_init_if_empty(*output, out_info);
+
+ Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration_complex));
+ Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
+ Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
+
+ AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration_complex);
+ AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration_complex);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration_complex);
+
+ bool window_changed = update_window_and_padding(win_input1, input1_access)
+ || update_window_and_padding(win_input2, input2_access)
+ || update_window_and_padding(win, output_access);
+
+ output_access.set_valid_region(win, valid_region);
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+}
+} // namespace
+
+CLComplexPixelWiseMultiplicationKernel::CLComplexPixelWiseMultiplicationKernel()
+ : _input1(nullptr), _input2(nullptr), _output(nullptr)
+{
+}
+
+void CLComplexPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1->info(), input2->info(), output->info()));
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window_complex(input1->info(), input2->info(), output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+
+ _input1 = input1;
+ _input2 = input2;
+ _output = output;
+
+ // Create kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("pixelwise_mul_complex"));
+
+ ICLKernel::configure_internal(win_config.second);
+}
+
+Status CLComplexPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_complex(input1, input2, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_complex(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
+
+ return Status{};
+}
+
+void CLComplexPixelWiseMultiplicationKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ const TensorShape &in_shape1 = _input1->info()->tensor_shape();
+ const TensorShape &in_shape2 = _input2->info()->tensor_shape();
+ const TensorShape &out_shape = _output->info()->tensor_shape();
+
+ bool can_collapse = true;
+ if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
+ {
+ can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
+ for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
+ {
+ can_collapse = (in_shape1[d] == in_shape2[d]);
+ }
+ }
+
+ bool has_collapsed = false;
+ Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
+
+ const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
+ const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
+
+ Window slice = collapsed.first_slice_window_3D();
+ Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
+ Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input1, slice_input1);
+ add_3D_tensor_argument(idx, _input2, slice_input2);
+ add_3D_tensor_argument(idx, _output, slice);
+ enqueue(queue, *this, slice);
+
+ collapsed.slide_window_slice_3D(slice_input1);
+ collapsed.slide_window_slice_3D(slice_input2);
+ }
+ while(collapsed.slide_window_slice_3D(slice));
+}
+
+BorderSize CLComplexPixelWiseMultiplicationKernel::border_size() const
+{
+ const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
+ const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration_complex - 1U, replicateSize);
+ return BorderSize(0, border, 0, 0);
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp
index 9f498b8273..db4850f14e 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.cpp
+++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp
@@ -47,7 +47,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+ if(input->num_channels() == 1)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32);
+ }
ARM_COMPUTE_RETURN_ERROR_ON_MSG(op == ReductionOperation::SUM_SQUARE && input->data_type() == DataType::QASYMM8, "Not supported reduction operation for QASYMM8");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
@@ -77,7 +84,7 @@ std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITe
output_shape.set(axis, 1);
const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX);
DataType output_data_type = is_arg_min_max ? DataType::U32 : input->data_type();
- auto_init_if_empty(*output, output_shape, 1, output_data_type, input->quantization_info());
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
const unsigned int num_elems_processed_per_iteration = (is_data_type_quantized(input->data_type()) && (axis == 0)) ? 1 : 16;
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
@@ -160,6 +167,7 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX");
build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MIN, "-DARG_MIN");
build_opts.add_option_if(op == ReductionOperation::PROD, "-DPROD");
+ build_opts.add_option_if(input->info()->num_channels() == 2, "-DCOMPLEX");
switch(op)
{
diff --git a/src/runtime/CL/functions/CLFFT1D.cpp b/src/runtime/CL/functions/CLFFT1D.cpp
index d893cd3d1b..67111e7e5c 100644
--- a/src/runtime/CL/functions/CLFFT1D.cpp
+++ b/src/runtime/CL/functions/CLFFT1D.cpp
@@ -31,7 +31,7 @@
namespace arm_compute
{
CLFFT1D::CLFFT1D(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _digit_reversed_input(), _digit_reverse_indices(), _digit_reverse_kernel(), _fft_kernels(), _num_ffts(0)
+ : _memory_group(std::move(memory_manager)), _digit_reverse_kernel(), _fft_kernels(), _scale_kernel(), _digit_reversed_input(), _digit_reverse_indices(), _num_ffts(0), _run_scale(false)
{
}
@@ -46,11 +46,18 @@ void CLFFT1D::configure(const ICLTensor *input, ICLTensor *output, const FFT1DIn
const auto decomposed_vector = arm_compute::helpers::fft::decompose_stages(N, supported_radix);
ARM_COMPUTE_ERROR_ON(decomposed_vector.empty());
+ // Flags
+ _run_scale = config.direction == FFTDirection::Inverse;
+ const bool is_c2r = input->info()->num_channels() == 2 && output->info()->num_channels() == 1;
+
// Configure digit reverse
+ FFTDigitReverseKernelInfo digit_reverse_config;
+ digit_reverse_config.axis = config.axis;
+ digit_reverse_config.conjugate = config.direction == FFTDirection::Inverse;
TensorInfo digit_reverse_indices_info(TensorShape(input->info()->tensor_shape()[config.axis]), 1, DataType::U32);
_digit_reverse_indices.allocator()->init(digit_reverse_indices_info);
_memory_group.manage(&_digit_reversed_input);
- _digit_reverse_kernel.configure(input, &_digit_reversed_input, &_digit_reverse_indices, config.axis);
+ _digit_reverse_kernel.configure(input, &_digit_reversed_input, &_digit_reverse_indices, digit_reverse_config);
// Create and configure FFT kernels
unsigned int Nx = 1;
@@ -60,16 +67,25 @@ void CLFFT1D::configure(const ICLTensor *input, ICLTensor *output, const FFT1DIn
{
const unsigned int radix_for_stage = decomposed_vector.at(i);
- FFTRadixStageKernelDescriptor fft_kernel_desc;
- fft_kernel_desc.axis = config.axis;
- fft_kernel_desc.radix = radix_for_stage;
- fft_kernel_desc.Nx = Nx;
- fft_kernel_desc.is_first_stage = (i == 0);
- _fft_kernels[i].configure(&_digit_reversed_input, i == (_num_ffts - 1) ? output : nullptr, fft_kernel_desc);
+ FFTRadixStageKernelInfo fft_kernel_info;
+ fft_kernel_info.axis = config.axis;
+ fft_kernel_info.radix = radix_for_stage;
+ fft_kernel_info.Nx = Nx;
+ fft_kernel_info.is_first_stage = (i == 0);
+ _fft_kernels[i].configure(&_digit_reversed_input, ((i == (_num_ffts - 1)) && !is_c2r) ? output : nullptr, fft_kernel_info);
Nx *= radix_for_stage;
}
+ // Configure scale kernel
+ if(_run_scale)
+ {
+ FFTScaleKernelInfo scale_config;
+ scale_config.scale = static_cast<float>(N);
+ scale_config.conjugate = config.direction == FFTDirection::Inverse;
+ is_c2r ? _scale_kernel.configure(&_digit_reversed_input, output, scale_config) : _scale_kernel.configure(output, nullptr, scale_config);
+ }
+
// Allocate tensors
_digit_reversed_input.allocator()->allocate();
_digit_reverse_indices.allocator()->allocate();
@@ -84,8 +100,9 @@ void CLFFT1D::configure(const ICLTensor *input, ICLTensor *output, const FFT1DIn
Status CLFFT1D::validate(const ITensorInfo *input, const ITensorInfo *output, const FFT1DInfo &config)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(config.axis != 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() != DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != 1 && input->num_channels() != 2);
+ ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0);
// Check if FFT is decomposable
const auto supported_radix = CLFFTRadixStageKernel::supported_radix();
@@ -96,6 +113,8 @@ Status CLFFT1D::validate(const ITensorInfo *input, const ITensorInfo *output, co
// Checks performed when output is configured
if((output != nullptr) && (output->total_size() != 0))
{
+ ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() == 1 && input->num_channels() == 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != 1 && output->num_channels() != 2);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -107,11 +126,19 @@ void CLFFT1D::run()
{
MemoryGroupResourceScope scope_mg(_memory_group);
+ // Run digit reverse
CLScheduler::get().enqueue(_digit_reverse_kernel, false);
+ // Run radix kernels
for(unsigned int i = 0; i < _num_ffts; ++i)
{
- CLScheduler::get().enqueue(_fft_kernels[i], i == (_num_ffts - 1));
+ CLScheduler::get().enqueue(_fft_kernels[i], i == (_num_ffts - 1) && !_run_scale);
+ }
+
+ // Run output scaling
+ if(_run_scale)
+ {
+ CLScheduler::get().enqueue(_scale_kernel, true);
}
}
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLFFT2D.cpp b/src/runtime/CL/functions/CLFFT2D.cpp
new file mode 100644
index 0000000000..4300fb4e32
--- /dev/null
+++ b/src/runtime/CL/functions/CLFFT2D.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLFFT2D.h"
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+namespace arm_compute
+{
+CLFFT2D::CLFFT2D(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(memory_manager), _first_pass_func(memory_manager), _second_pass_func(memory_manager), _first_pass_tensor()
+{
+}
+
+void CLFFT2D::configure(const ICLTensor *input, ICLTensor *output, const FFT2DInfo &config)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_THROW_ON(CLFFT2D::validate(input->info(), output->info(), config));
+
+ // Setup first pass
+ FFT1DInfo first_pass_config;
+ first_pass_config.axis = config.axes.first;
+ first_pass_config.direction = config.direction;
+ _memory_group.manage(&_first_pass_tensor);
+ _first_pass_func.configure(input, &_first_pass_tensor, first_pass_config);
+
+ // Setup second pass
+ FFT1DInfo second_pass_config;
+ second_pass_config.axis = config.axes.second;
+ second_pass_config.direction = config.direction;
+ _second_pass_func.configure(&_first_pass_tensor, output, second_pass_config);
+ _first_pass_tensor.allocator()->allocate();
+}
+
+Status CLFFT2D::validate(const ITensorInfo *input, const ITensorInfo *output, const FFT2DInfo &config)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+
+ // Create intermediate tensor info
+ TensorInfo first_pass_tensor(input->clone()->set_is_resizable(true).reset_padding().set_num_channels(2));
+
+ // Validate first pass
+ FFT1DInfo first_pass_config;
+ first_pass_config.axis = config.axes.first;
+ first_pass_config.direction = config.direction;
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFFT1D::validate(input, &first_pass_tensor, first_pass_config));
+
+ // Validate second pass
+ FFT1DInfo second_pass_config;
+ second_pass_config.axis = config.axes.second;
+ second_pass_config.direction = config.direction;
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFFT1D::validate(&first_pass_tensor, output, second_pass_config));
+
+ // Checks performed when output is configured
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+
+ return Status{};
+}
+
+void CLFFT2D::run()
+{
+ _memory_group.acquire();
+
+ _first_pass_func.run();
+ _second_pass_func.run();
+
+ _memory_group.release();
+}
+} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp b/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp
new file mode 100644
index 0000000000..441c1c7214
--- /dev/null
+++ b/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/helpers/fft.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "arm_compute/runtime/CPP/CPPScheduler.h"
+
+namespace arm_compute
+{
+namespace
+{
+int pad_decomposable(int N)
+{
+ const auto supported_radix = CLFFTRadixStageKernel::supported_radix();
+
+ int pad = 0;
+ bool is_decomposed = false;
+ while(!is_decomposed)
+ {
+ const auto decomposed_vector = arm_compute::helpers::fft::decompose_stages(N++, supported_radix);
+ is_decomposed = !decomposed_vector.empty();
+ if(!is_decomposed)
+ {
+ ++pad;
+ }
+ }
+ return pad;
+}
+} // namespace
+CLFFTConvolutionLayer::CLFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(memory_manager),
+ _flip_weights_func(),
+ _permute_input_func(),
+ _permute_output_func(),
+ _permute_weights_func(),
+ _permute_bias_func(),
+ _pad_input_func(),
+ _pad_weights_func(),
+ _transform_input_func(memory_manager),
+ _transform_weights_func(memory_manager),
+ _itransform_output_func(memory_manager),
+ _prod_func(),
+ _reduce_func(),
+ _extract_output_func(),
+ _bias_add_func(),
+ _activation_layer_func(),
+ _permuted_input(),
+ _permuted_weights(),
+ _permuted_bias(),
+ _permuted_output(),
+ _padded_input(),
+ _padded_weights(),
+ _flip_axis(),
+ _flipped_weights(),
+ _transformed_input(),
+ _transformed_weights(),
+ _input_weights_product(),
+ _output_product(),
+ _output_reduced(),
+ _itransformed_output(),
+ _reshaped_output(),
+ _bias_output(),
+ _original_weights(nullptr),
+ _original_bias(nullptr),
+ _is_activationlayer_enabled(false),
+ _needs_permute(false),
+ _has_bias(false),
+ _is_prepared(false)
+{
+}
+
+void CLFFTConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
+{
+ _original_weights = weights;
+ _original_bias = biases;
+
+ // Flat if bias addition is required
+ _has_bias = biases != nullptr;
+
+ // Get indices for the width and height
+ const size_t idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
+
+ // Input shape, kernel size and output tile
+ const Size2D input_dims = Size2D(input->info()->tensor_shape()[idx_width], input->info()->tensor_shape()[idx_height]);
+ const Size2D kernel_size = Size2D(weights->info()->tensor_shape()[idx_width], weights->info()->tensor_shape()[idx_height]);
+ const Size2D pad_valid = Size2D(pad_decomposable(input_dims.x() + kernel_size.x() - 1),
+ pad_decomposable(input_dims.y() + kernel_size.y() - 1));
+ // Tensors to use
+ ICLTensor *input_to_use = input;
+ const ICLTensor *weights_to_use = weights;
+ ICLTensor *output_to_use = _has_bias ? &_bias_output : output;
+
+ // Permute bias
+ _permute_bias_func.configure(biases, &_permuted_bias, PermutationVector(1U, 2U, 0U));
+ _permuted_bias.info()->set_data_layout(DataLayout::NCHW);
+
+ // Permute input if needed
+ _needs_permute = input->info()->data_layout() == DataLayout::NHWC;
+ if(_needs_permute)
+ {
+ _memory_group.manage(&_permuted_input);
+ // Configure the function to transform the input tensor from NHWC -> NCHW
+ _permute_input_func.configure(input, &_permuted_input, PermutationVector(1U, 2U, 0U));
+ _permuted_input.info()->set_data_layout(DataLayout::NCHW);
+
+ // Configure the function to transform the weights tensor from HWI -> IHW
+ _permute_weights_func.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
+ _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
+
+ input_to_use = &_permuted_input;
+ weights_to_use = &_permuted_weights;
+ }
+
+ // Flip weights
+ _flipped_weights.allocator()->init(weights_to_use->info()->clone()->set_is_resizable(true).reset_padding());
+ _flip_axis.allocator()->init(TensorInfo(TensorShape(2U), 1, DataType::U32));
+ _flip_weights_func.configure(weights_to_use, &_flipped_weights, &_flip_axis);
+
+ // Pad weights
+ const PaddingList padding_w = { { 0, input_dims.x() + pad_valid.x() - 1 }, { 0, input_dims.y() + pad_valid.y() - 1 } };
+ _pad_weights_func.configure(&_flipped_weights, &_padded_weights, padding_w);
+
+ // Transform weights
+ _transform_weights_func.configure(&_padded_weights, &_transformed_weights, FFT2DInfo());
+
+ // Pad input
+ const PaddingList padding_in = { { 0, kernel_size.x() + pad_valid.x() - 1 }, { 0, kernel_size.y() + pad_valid.y() - 1 } };
+ _memory_group.manage(&_padded_input);
+ _pad_input_func.configure(input_to_use, &_padded_input, padding_in);
+ if(_needs_permute)
+ {
+ _permuted_input.allocator()->allocate();
+ }
+
+ // Transform input
+ _memory_group.manage(&_transformed_input);
+ _transform_input_func.configure(&_padded_input, &_transformed_input, FFT2DInfo());
+ _padded_input.allocator()->allocate();
+
+ // Perform product
+ _memory_group.manage(&_output_product);
+ _prod_func.configure(&_transformed_input, &_transformed_weights, &_output_product);
+ _transformed_input.allocator()->allocate();
+
+ // Perform reduction
+ _memory_group.manage(&_output_reduced);
+ _reduce_func.configure(&_output_product, &_output_reduced, 2, ReductionOperation::SUM);
+ _output_product.allocator()->allocate();
+
+ // Transform output
+ _memory_group.manage(&_itransformed_output);
+ FFT2DInfo itranform_info;
+ itranform_info.direction = FFTDirection::Inverse;
+ _itransformed_output.allocator()->init(_output_reduced.info()->clone()->set_is_resizable(true).set_num_channels(1).reset_padding());
+ _itransform_output_func.configure(&_output_reduced, &_itransformed_output, itranform_info);
+ _output_reduced.allocator()->allocate();
+
+ // Reshape output
+ TensorShape reshaped_shape = _itransformed_output.info()->tensor_shape();
+ reshaped_shape.remove_dimension(2);
+ _reshaped_output.allocator()->init(_itransformed_output.info()->clone()->set_tensor_shape(reshaped_shape));
+
+ // Extract correct region
+ const int start_left = kernel_size.x() - conv_info.pad_left() - 1;
+ const int start_top = kernel_size.y() - conv_info.pad_top() - 1;
+ const int end_right = _reshaped_output.info()->tensor_shape().x() - (kernel_size.x() - conv_info.pad_right() - 1) - pad_valid.x();
+ const int end_botton = _reshaped_output.info()->tensor_shape().y() - (kernel_size.y() - conv_info.pad_bottom() - 1) - pad_valid.y();
+ if(_has_bias)
+ {
+ _memory_group.manage(&_bias_output);
+ }
+ else if(_needs_permute)
+ {
+ output_to_use = &_permuted_output;
+ _memory_group.manage(&_permuted_output);
+ }
+ _extract_output_func.configure(&_reshaped_output, output_to_use, Coordinates(start_left, start_top), Coordinates(end_right, end_botton));
+ _itransformed_output.allocator()->allocate();
+
+ // Add bias
+ if(biases != nullptr)
+ {
+ output_to_use = output;
+ if(_needs_permute)
+ {
+ output_to_use = &_permuted_output;
+ _memory_group.manage(&_permuted_output);
+ }
+ auto_init_if_empty(*output_to_use->info(), *_bias_output.info());
+ _bias_add_func.configure(&_bias_output, &_permuted_bias, output_to_use, ConvertPolicy::WRAP);
+ _bias_output.allocator()->allocate();
+ }
+
+ // Permute output
+ if(_needs_permute)
+ {
+ // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
+ _permuted_output.info()->set_data_layout(DataLayout::NCHW);
+ _permute_output_func.configure(&_permuted_output, output, PermutationVector(2U, 0U, 1U));
+
+ // Allocate tensors
+ _permuted_output.allocator()->allocate();
+ }
+
+ // Configure Activation Layer
+ _is_activationlayer_enabled = act_info.enabled();
+ if(_is_activationlayer_enabled)
+ {
+ _activation_layer_func.configure(output, nullptr, act_info);
+ }
+
+ // Setup flip axis data
+ _flip_axis.allocator()->allocate();
+ _flip_axis.map(true);
+ auto axis_data = reinterpret_cast<uint32_t *>(_flip_axis.buffer());
+ axis_data[0] = 0;
+ axis_data[1] = 1;
+ _flip_axis.unmap();
+}
+
+Status CLFFTConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+
+ // Get indices for the width and height
+ const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
+
+ // Input shape, kernel size and output tile
+ const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
+
+ // Strides
+ const auto strides = conv_info.stride();
+ ARM_COMPUTE_RETURN_ERROR_ON(strides.first != strides.second && strides.first != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(kernel_size.x() != kernel_size.y());
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_left() != (kernel_size.x() / 2) || conv_info.pad_right() != (kernel_size.x() / 2));
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_top() != (kernel_size.y() / 2) || conv_info.pad_bottom() != (kernel_size.y() / 2));
+
+ // Validate biases
+ if(biases != nullptr)
+ {
+ const size_t idx_channels = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channels] != biases->tensor_shape().x());
+ }
+
+ // Checks performed when output is configured
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+
+ // Validate Activation Layer
+ if(act_info.enabled())
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
+ }
+ }
+
+ return Status{};
+}
+
+void CLFFTConvolutionLayer::run()
+{
+ prepare();
+
+ _memory_group.acquire();
+
+ // Transform input
+ if(_needs_permute)
+ {
+ _permute_input_func.run();
+ }
+ _pad_input_func.run();
+ _transform_input_func.run();
+
+ // Perform operations to frequency domain
+ _prod_func.run();
+ _reduce_func.run();
+
+ // Transform output
+ _itransform_output_func.run();
+ _reshaped_output.allocator()->import_memory(_itransformed_output.cl_buffer());
+ _extract_output_func.run();
+ // Add bias
+ if(_has_bias)
+ {
+ _bias_add_func.run();
+ }
+ if(_needs_permute)
+ {
+ _permute_output_func.run();
+ }
+
+ // Run activation layer
+ if(_is_activationlayer_enabled)
+ {
+ _activation_layer_func.run();
+ }
+
+ _memory_group.release();
+}
+
+void CLFFTConvolutionLayer::prepare()
+{
+ if(!_is_prepared)
+ {
+ // Permute bias to NCHW
+ if(_original_bias != nullptr)
+ {
+ _permuted_bias.allocator()->allocate();
+ _permute_bias_func.run();
+ _original_bias->mark_as_unused();
+ }
+
+ const ICLTensor *cur_weights = _original_weights;
+ // Permute weights
+ if(_needs_permute)
+ {
+ ARM_COMPUTE_ERROR_ON(!cur_weights->is_used());
+
+ _permuted_weights.allocator()->allocate();
+ _permute_weights_func.run();
+ cur_weights->mark_as_unused();
+ cur_weights = &_permuted_weights;
+ }
+
+ // Flip weights
+ _flipped_weights.allocator()->allocate();
+ _flip_weights_func.run();
+ cur_weights->mark_as_unused();
+
+ // Pad weights
+ _padded_weights.allocator()->allocate();
+ _pad_weights_func.run();
+ _flipped_weights.mark_as_unused();
+ CLScheduler::get().queue().finish();
+ _flipped_weights.allocator()->free();
+
+ // Transform weights to frequence domain
+ _transformed_weights.allocator()->allocate();
+ _transform_weights_func.run();
+ _padded_weights.mark_as_unused();
+ CLScheduler::get().queue().finish();
+ _padded_weights.allocator()->free();
+
+ _is_prepared = true;
+ }
+}
+} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
index b4c20db3da..959464ce14 100644
--- a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
+++ b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,8 +29,8 @@
#include <utility>
-using namespace arm_compute;
-
+namespace arm_compute
+{
void CLPixelWiseMultiplication::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
{
@@ -54,3 +54,26 @@ Status CLPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITen
{
return CLPixelWiseMultiplicationKernel::validate(input1, input2, output, scale, overflow_policy, rounding_policy);
}
+
+void CLComplexPixelWiseMultiplication::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output)
+{
+ auto k = arm_compute::support::cpp14::make_unique<CLComplexPixelWiseMultiplicationKernel>();
+ k->configure(input1, input2, output);
+ _kernel = std::move(k);
+
+ if(output->info()->dimension(0) > 1)
+ {
+ ICLTensor *broadcasted_info = (input1->info()->dimension(0) == 1) ? input1 : input2;
+
+ if(broadcasted_info->info()->dimension(0) == 1)
+ {
+ _border_handler.configure(broadcasted_info, _kernel->border_size(), BorderMode::REPLICATE);
+ }
+ }
+}
+
+Status CLComplexPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+{
+ return CLComplexPixelWiseMultiplicationKernel::validate(input1, input2, output);
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/benchmark/CL/ConvolutionLayer.cpp b/tests/benchmark/CL/ConvolutionLayer.cpp
index 5eb33658ff..20828b7717 100644
--- a/tests/benchmark/CL/ConvolutionLayer.cpp
+++ b/tests/benchmark/CL/ConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,6 +29,7 @@
#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/benchmark/fixtures/ConvolutionLayerFixture.h"
+#include "tests/benchmark/fixtures/FFTConvolutionLayerFixture.h"
#include "tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h"
#include "tests/datasets/system_tests/alexnet/AlexNetConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/googlenet/inceptionv1/GoogLeNetInceptionV1ConvolutionLayerDataset.h"
@@ -41,6 +42,9 @@
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "utils/TypePrinter.h"
+#include <arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h>
+#include <tests/datasets/SmallConvolutionLayerDataset.h>
+#include <tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h>
namespace arm_compute
{
@@ -53,11 +57,17 @@ namespace
const auto data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32, DataType::QASYMM8 });
} // namespace
-using CLGEMMConvolutionLayerFixture = ConvolutionLayerFixture<CLTensor, CLGEMMConvolutionLayer, CLAccessor>;
-
TEST_SUITE(CL)
-using CLWinogradLayerFixture = WinogradConvolutionLayerFixture<CLTensor, CLWinogradConvolutionLayer, CLAccessor>;
+using CLGEMMConvolutionLayerFixture = ConvolutionLayerFixture<CLTensor, CLGEMMConvolutionLayer, CLAccessor>;
+using CLWinogradLayerFixture = WinogradConvolutionLayerFixture<CLTensor, CLWinogradConvolutionLayer, CLAccessor>;
+using CLFFTConvolutionLayerFixture = FFTConvolutionLayerFixture<CLTensor, CLFFTConvolutionLayer, CLAccessor>;
+
+REGISTER_FIXTURE_DATA_TEST_CASE(ResNet12FFTLayer, CLFFTConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::ResNet12FFTConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, CLWinogradLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(),
diff --git a/tests/benchmark/CL/FFT.cpp b/tests/benchmark/CL/FFT.cpp
index b345d58eaf..7f1ae63708 100644
--- a/tests/benchmark/CL/FFT.cpp
+++ b/tests/benchmark/CL/FFT.cpp
@@ -24,6 +24,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLFFT1D.h"
+#include "arm_compute/runtime/CL/functions/CLFFT2D.h"
#include "tests/CL/CLAccessor.h"
#include "tests/benchmark/fixtures/FFTFixture.h"
#include "tests/framework/Macros.h"
@@ -42,13 +43,17 @@ const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
const auto shapes = framework::dataset::make("Shapes", { TensorShape(192U, 128U, 64U), TensorShape(224U, 224U) });
} // namespace
-using CLFFT1DFixture = FFT1DFixture<CLTensor, CLFFT1D, CLAccessor>;
+using CLFFT1DFixture = FFTFixture<CLTensor, CLFFT1D, FFT1DInfo, CLAccessor>;
+using CLFFT2DFixture = FFTFixture<CLTensor, CLFFT2D, FFT2DInfo, CLAccessor>;
TEST_SUITE(CL)
REGISTER_FIXTURE_DATA_TEST_CASE(FFT1D, CLFFT1DFixture, framework::DatasetMode::ALL,
framework::dataset::combine(shapes, data_types));
+REGISTER_FIXTURE_DATA_TEST_CASE(FFT2D, CLFFT2DFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(shapes, data_types));
+
TEST_SUITE_END() // CL
} // namespace benchmark
} // namespace test
diff --git a/tests/benchmark/fixtures/FFTConvolutionLayerFixture.h b/tests/benchmark/fixtures/FFTConvolutionLayerFixture.h
new file mode 100644
index 0000000000..2c53e3ad9b
--- /dev/null
+++ b/tests/benchmark/fixtures/FFTConvolutionLayerFixture.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_FFT_CONVOLUTION_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_FFT_CONVOLUTION_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/Globals.h"
+#include "tests/Utils.h"
+#include "tests/framework/Fixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace benchmark
+{
+/** Fixture that can be used for NEON and CL */
+template <typename TensorType, typename Function, typename Accessor>
+class FFTConvolutionLayerFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, ActivationLayerInfo act_info, DataType data_type,
+ int batches)
+ {
+ ARM_COMPUTE_UNUSED(dilation);
+
+ // Set batched in source and destination shapes
+
+ src_shape.set(3 /* batch */, batches);
+ dst_shape.set(3 /* batch */, batches);
+
+ // Create tensors
+ src = create_tensor<TensorType>(src_shape, data_type, 1);
+ weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+ biases = create_tensor<TensorType>(biases_shape, data_type, 1);
+ dst = create_tensor<TensorType>(dst_shape, data_type, 1);
+
+ // Create and configure function
+ conv_layer.configure(&src, &weights, &biases, &dst, info, act_info);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ biases.allocator()->allocate();
+ dst.allocator()->allocate();
+ }
+
+ void run()
+ {
+ conv_layer.run();
+ }
+
+ void sync()
+ {
+ sync_if_necessary<TensorType>();
+ sync_tensor_if_necessary<TensorType>(dst);
+ }
+
+ void teardown()
+ {
+ src.allocator()->free();
+ weights.allocator()->free();
+ biases.allocator()->free();
+ dst.allocator()->free();
+ }
+
+private:
+ TensorType src{};
+ TensorType weights{};
+ TensorType biases{};
+ TensorType dst{};
+ Function conv_layer{};
+};
+} // namespace benchmark
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_FFT_CONVOLUTION_LAYER_FIXTURE */
diff --git a/tests/benchmark/fixtures/FFTFixture.h b/tests/benchmark/fixtures/FFTFixture.h
index c9c4e3a88e..53897b1b14 100644
--- a/tests/benchmark/fixtures/FFTFixture.h
+++ b/tests/benchmark/fixtures/FFTFixture.h
@@ -36,8 +36,8 @@ namespace test
{
namespace benchmark
{
-template <typename TensorType, typename Function, typename Accessor>
-class FFT1DFixture : public framework::Fixture
+template <typename TensorType, typename Function, typename FFTInfo, typename Accessor>
+class FFTFixture : public framework::Fixture
{
public:
template <typename...>
@@ -48,7 +48,7 @@ public:
dst = create_tensor<TensorType>(shape, data_type, 2);
// Create and configure function
- fft_func.configure(&src, &dst, FFT1DInfo());
+ fft_func.configure(&src, &dst, FFTInfo());
// Allocate tensors
src.allocator()->allocate();
diff --git a/tests/datasets/SmallConvolutionLayerDataset.h b/tests/datasets/SmallConvolutionLayerDataset.h
index 73f1554c49..22d0bc582a 100644
--- a/tests/datasets/SmallConvolutionLayerDataset.h
+++ b/tests/datasets/SmallConvolutionLayerDataset.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -132,6 +132,17 @@ public:
}
};
+class SmallFFTConvolutionLayerDataset final : public ConvolutionLayerDataset
+{
+public:
+ SmallFFTConvolutionLayerDataset()
+ {
+ add_config(TensorShape(8U, 7U, 3U), TensorShape(3U, 3U, 3U, 2U), TensorShape(2U), TensorShape(8U, 7U, 2U), PadStrideInfo(1, 1, 1, 1));
+ add_config(TensorShape(64U, 32U, 5U), TensorShape(5U, 5U, 5U, 10U), TensorShape(10U), TensorShape(64U, 32U, 10U), PadStrideInfo(1, 1, 2, 2));
+ add_config(TensorShape(192U, 128U, 8U), TensorShape(9U, 9U, 8U, 3U), TensorShape(3U), TensorShape(192U, 128U, 3U), PadStrideInfo(1, 1, 4, 4));
+ }
+};
+
class SmallConvolutionLayerDataset final : public ConvolutionLayerDataset
{
public:
diff --git a/tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h b/tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h
new file mode 100644
index 0000000000..b960dceafd
--- /dev/null
+++ b/tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_RESNET12_CONVOLUTION_LAYER_DATASET
+#define ARM_COMPUTE_TEST_RESNET12_CONVOLUTION_LAYER_DATASET
+
+#include "tests/datasets/ConvolutionLayerDataset.h"
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class ResNet12FFTConvolutionLayerDataset final : public ConvolutionLayerDataset
+{
+public:
+ ResNet12FFTConvolutionLayerDataset()
+ {
+ add_config(TensorShape(192U, 128U, 64U), TensorShape(9U, 9U, 64U, 3U), TensorShape(3U), TensorShape(192U, 128U, 3U), PadStrideInfo(1, 1, 4, 4));
+ }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_RESNET12_CONVOLUTION_LAYER_DATASET */
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 41d2b7bb5e..f1f9b59330 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -46,7 +46,7 @@ namespace validation
namespace
{
constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
-RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+RelativeTolerance<float> tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
diff --git a/tests/validation/CL/FFT.cpp b/tests/validation/CL/FFT.cpp
index 0d29532c29..9fdd85b604 100644
--- a/tests/validation/CL/FFT.cpp
+++ b/tests/validation/CL/FFT.cpp
@@ -24,7 +24,10 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLFFT1D.h"
+#include "arm_compute/runtime/CL/functions/CLFFT2D.h"
+#include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/SmallConvolutionLayerDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
@@ -40,7 +43,7 @@ namespace validation
namespace
{
const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
-const auto shapes = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 2U, 3U),
+const auto shapes_1d = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 2U, 3U),
TensorShape(4U, 2U, 3U), TensorShape(5U, 2U, 3U),
TensorShape(7U, 2U, 3U), TensorShape(8U, 2U, 3U),
TensorShape(9U, 2U, 3U), TensorShape(25U, 2U, 3U),
@@ -48,11 +51,27 @@ const auto shapes = framework::dataset::make("TensorShape", { TensorShape(2U
TensorShape(16U, 2U, 3U), TensorShape(32U, 2U, 3U),
TensorShape(96U, 2U, 2U)
});
+const auto shapes_2d = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 6U, 3U),
+ TensorShape(4U, 5U, 3U), TensorShape(5U, 7U, 3U),
+ TensorShape(7U, 25U, 3U), TensorShape(8U, 2U, 3U),
+ TensorShape(9U, 16U, 3U), TensorShape(25U, 32U, 3U),
+ TensorShape(192U, 128U, 2U)
+ });
+
+const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
+});
+
+RelativeTolerance<float> tolerance_f32(0.1f); /**< Relative tolerance value for FP32 */
+constexpr float tolerance_num = 0.07f; /**< Tolerance number */
+
} // namespace
TEST_SUITE(CL)
TEST_SUITE(FFT1D)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes, data_types),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes_1d, data_types),
shape, data_type)
{
// Create tensors
@@ -81,19 +100,19 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes, data_
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid channels
+ TensorInfo(TensorShape(32U, 13U, 2U), 3, DataType::F32), // Invalid channels
TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Unsupported axis
TensorInfo(TensorShape(11U, 13U, 2U), 2, DataType::F32), // Undecomposable FFT
TensorInfo(TensorShape(25U, 13U, 2U), 2, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F16),
TensorInfo(TensorShape(16U, 13U, 2U), 2, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32),
TensorInfo(TensorShape(11U, 13U, 2U), 2, DataType::F32),
TensorInfo(TensorShape(25U, 13U, 2U), 2, DataType::F32),
})),
- framework::dataset::make("Axis", { 0, 0, 0, 1, 0, 0 })),
+ framework::dataset::make("Axis", { 0, 0, 0, 2, 0, 0 })),
framework::dataset::make("Expected", { false, false, false, false, false, true })),
input_info, output_info, axis, expected)
{
@@ -106,19 +125,103 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
// *INDENT-ON*
template <typename T>
-using CLFFT1DFixture = FFTValidationFixture<CLTensor, CLAccessor, CLFFT1D, T>;
+using CLFFT1DFixture = FFTValidationFixture<CLTensor, CLAccessor, CLFFT1D, FFT1DInfo, T>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture<float>, framework::DatasetMode::ALL, combine(shapes, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture<float>, framework::DatasetMode::ALL, combine(shapes_1d, framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
- validate(CLAccessor(_target), _reference, RelativeTolerance<float>(0.1f), 0.05f);
+ validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
TEST_SUITE_END() // FFT1D
+
+TEST_SUITE(FFT2D)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes_2d, data_types),
+ shape, data_type)
+{
+ // Create tensors
+ CLTensor src = create_tensor<CLTensor>(shape, data_type, 2);
+ CLTensor dst = create_tensor<CLTensor>(shape, data_type, 2);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLFFT2D fft2d;
+ fft2d.configure(&src, &dst, FFT2DInfo());
+
+ // Validate valid region
+ const ValidRegion valid_region = shape_to_valid_region(shape);
+ validate(src.info()->valid_region(), valid_region);
+ validate(dst.info()->valid_region(), valid_region);
+
+ // Validate padding
+ validate(src.info()->padding(), PaddingSize());
+ validate(dst.info()->padding(), PaddingSize());
+}
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), // Mismatching data types
+ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(32U, 25U, 2U), 3, DataType::F32), // Invalid channels
+ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Undecomposable FFT
+ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32),
+ }),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F16),
+ TensorInfo(TensorShape(16U, 25U, 2U), 2, DataType::F32),
+ TensorInfo(TensorShape(32U, 25U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32),
+ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32),
+ })),
+ framework::dataset::make("Expected", { false, false, false, false, true })),
+ input_info, output_info, expected)
+{
+ const Status s = CLFFT2D::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), FFT2DInfo());
+ ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using CLFFT2DFixture = FFTValidationFixture<CLTensor, CLAccessor, CLFFT2D, FFT2DInfo, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT2DFixture<float>, framework::DatasetMode::ALL, combine(shapes_2d, framework::dataset::make("DataType", DataType::F32)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // FFT2D
+
+TEST_SUITE(FFTConvolutionLayer)
+
+template <typename T>
+using CLFFTConvolutionLayerFixture = FFTConvolutionValidationFixture<CLTensor, CLAccessor, CLFFTConvolutionLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ ActivationFunctionsSmallDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // FFTConvolutionLayer
+
TEST_SUITE_END() // CL
} // namespace validation
} // namespace test
diff --git a/tests/validation/CL/ReductionOperation.cpp b/tests/validation/CL/ReductionOperation.cpp
index c8474e97e6..79308c8229 100644
--- a/tests/validation/CL/ReductionOperation.cpp
+++ b/tests/validation/CL/ReductionOperation.cpp
@@ -63,7 +63,7 @@ TEST_SUITE(ReductionOperation)
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output
- TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1
+ TensorInfo(TensorShape(128U, 64U), 3, DataType::F32), // Number of Input channels != 1
TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != QASYMM8/F16/F32
TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
TensorInfo(TensorShape(128U, 64U), 1, DataType::QASYMM8), // Axis == 0 and SUM_SQUARE and QASYMM8
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 795b9de6cd..52fa8da60b 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h
index 8e3c01eaff..1aaa5965b2 100644
--- a/tests/validation/fixtures/FFTFixture.h
+++ b/tests/validation/fixtures/FFTFixture.h
@@ -31,6 +31,8 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/ConvolutionLayer.h"
#include "tests/validation/reference/DFT.h"
#include <random>
@@ -41,7 +43,7 @@ namespace test
{
namespace validation
{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename InfoType, typename T>
class FFTValidationFixture : public framework::Fixture
{
public:
@@ -68,8 +70,8 @@ protected:
TensorType dst = create_tensor<TensorType>(shape, data_type, 2);
// Create and configure function
- FunctionType fft1d;
- fft1d.configure(&src, &dst, FFT1DInfo());
+ FunctionType fft;
+ fft.configure(&src, &dst, InfoType());
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -85,7 +87,7 @@ protected:
fill(AccessorType(src));
// Compute function
- fft1d.run();
+ fft.run();
return dst;
}
@@ -97,12 +99,138 @@ protected:
// Fill reference
fill(src);
+ if(std::is_same<InfoType, FFT1DInfo>::value)
+ {
+ return reference::dft_1d(src, reference::FFTDirection::Forward);
+ }
+ else
+ {
+ return reference::dft_2d(src, reference::FFTDirection::Forward);
+ }
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FFTConvolutionValidationGenericFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
+ DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
+ {
+ _data_type = data_type;
+ _data_layout = data_layout;
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
+ const Size2D &dilation, const ActivationLayerInfo act_info)
+ {
+ ARM_COMPUTE_UNUSED(dilation);
+ ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
+
+ if(_data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+
+ // Create and configure function
+ FunctionType conv;
+ conv.configure(&src, &weights, &bias, &dst, info, act_info);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ bias.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src), 0);
+ fill(AccessorType(weights), 1);
+ fill(AccessorType(bias), 2);
+
+ // Compute convolution function
+ conv.run();
+
+ return dst;
+ }
- return reference::dft_1d(src, reference::FFTDirection::Forward);
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
+ const Size2D &dilation, const ActivationLayerInfo act_info)
+ {
+ ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
+
+ // Create reference
+ SimpleTensor<T> src{ input_shape, _data_type, 1 };
+ SimpleTensor<T> weights{ weights_shape, _data_type, 1 };
+ SimpleTensor<T> bias{ bias_shape, _data_type, 1 };
+
+ // Fill reference
+ fill(src, 0);
+ fill(weights, 1);
+ fill(bias, 2);
+
+ return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation), act_info) : reference::convolution_layer<T>(src,
+ weights, bias, output_shape, info, dilation);
}
TensorType _target{};
SimpleTensor<T> _reference{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
+ DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
+ {
+ FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
+ data_type, data_layout, act_info);
+ }
};
} // namespace validation
} // namespace test