aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2019-03-26 17:44:40 +0000
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2019-05-09 12:38:22 +0000
commit154bc1c3e6a0182e2130c7966af3944ee6ca20b3 (patch)
tree6cf717250870f311c99a4fbb6cdae4dfa84d5aae /arm_compute
parentae1a89ed670956b9722fe57c2dc36c75e5f948ec (diff)
downloadComputeLibrary-154bc1c3e6a0182e2130c7966af3944ee6ca20b3.tar.gz
COMPMID-1973: Implement FFTConvolutionLayer on NEON
Change-Id: I2e667c0411bda0164a616ffe44473a78de6752c9 Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1066 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h34
-rw-r--r--arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h8
-rw-r--r--arm_compute/core/NEON/kernels/NEFFTScaleKernel.h8
-rw-r--r--arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h46
-rw-r--r--arm_compute/runtime/FunctionDescriptors.h1
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEConvolutionLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEFFT1D.h5
-rw-r--r--arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h154
-rw-r--r--arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h22
10 files changed, 259 insertions, 24 deletions
diff --git a/arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h b/arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h
index 84d55fd8f4..0090c8cb14 100644
--- a/arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h
+++ b/arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_NEFFTDIGITREVERSEKERNEL_H__
#define __ARM_COMPUTE_NEFFTDIGITREVERSEKERNEL_H__
+#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/NEON/INEKernel.h"
namespace arm_compute
@@ -53,31 +54,40 @@ public:
~NEFFTDigitReverseKernel() = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: F32.
- * @param[out] output Destination tensor. Data type supported: same as @p input
+ * @param[in] input Source tensor. Data types supported: F32. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
+ * @param[out] output Destination tensor. Data type supported: same as @p input. Number of channels supported: 2 (complex tensor).
* @param[in] idx Digit reverse index tensor. Data type supported: U32
- * @param[in] axis Axis to perform digit reverse on.
+ * @param[in] config Kernel configuration.
*/
- void configure(const ITensor *input, ITensor *output, const ITensor *idx, unsigned int axis);
+ void configure(const ITensor *input, ITensor *output, const ITensor *idx, const FFTDigitReverseKernelInfo &config);
+
/** Static function to check if given info will lead to a valid configuration of @ref NEFFTDigitReverseKernel
*
- * @param[in] input Source tensor info. Data types supported: F32.
- * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] input Source tensor info. Data types supported: F32. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
+ * @param[in] output Destination tensor info. Data type supported: same as @p input. Number of channels supported: 2 (complex tensor).
* @param[in] idx Digit reverse index tensor info. Data type supported: U32
- * @param[in] axis Axis to perform digit reverse on.
+ * @param[in] config Kernel configuration
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, unsigned int axis);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
private:
- const ITensor *_input;
- ITensor *_output;
- const ITensor *_idx;
- unsigned int _axis;
+ using NEFFTDigitReverseKernelFunctionPtr = void (NEFFTDigitReverseKernel::*)(const Window &window);
+
+ template <bool is_input_complex, bool is_conj>
+ void digit_reverse_kernel_axis_0(const Window &window);
+
+ template <bool is_input_complex, bool is_conj>
+ void digit_reverse_kernel_axis_1(const Window &window);
+
+ NEFFTDigitReverseKernelFunctionPtr _func;
+ const ITensor *_input;
+ ITensor *_output;
+ const ITensor *_idx;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEFFTDIGITREVERSEKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h b/arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h
index 8498d3c613..2210980816 100644
--- a/arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h
+++ b/arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h
@@ -59,15 +59,15 @@ public:
*
* @note If the output tensor is nullptr, the FFT will be performed in-place
*
- * @param[in,out] input Source tensor. Data types supported: F32.
- * @param[out] output Destination tensor. Data type supported: same as @p input
+ * @param[in,out] input Source tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[out] output Destination tensor. Data type supported: same as @p input. Number of channels supported: same as @p input.
* @param[in] config FFT descriptor metadata.
*/
void configure(ITensor *input, ITensor *output, const FFTRadixStageKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref NEFFTRadixStageKernel
*
- * @param[in] input Source tensor info. Data types supported: F32.
- * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] input Source tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] output Destination tensor info. Data type supported: same as @p input. Number of channels supported: same as @p input.
* @param[in] config FFT descriptor metadata.
*
* @return a status
diff --git a/arm_compute/core/NEON/kernels/NEFFTScaleKernel.h b/arm_compute/core/NEON/kernels/NEFFTScaleKernel.h
index 5a19af7e62..51e6d5ab20 100644
--- a/arm_compute/core/NEON/kernels/NEFFTScaleKernel.h
+++ b/arm_compute/core/NEON/kernels/NEFFTScaleKernel.h
@@ -55,15 +55,15 @@ public:
~NEFFTScaleKernel() = default;
/** Set the input and output tensors.
*
- * @param[in,out] input Source tensor. Data types supported: F32.
- * @param[out] output Destination tensor. Data type supported: same as @p input
+ * @param[in,out] input Source tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[out] output Destination tensor. Data type supported: same as @p input. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
* @param[in] config Kernel configuration
*/
void configure(ITensor *input, ITensor *output, const FFTScaleKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref NEFFTScaleKernel
*
- * @param[in] input Source tensor info. Data types supported: F32.
- * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] input Source tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] output Destination tensor info. Data type supported: same as @p input. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
* @param[in] config Kernel configuration
*
* @return a status
diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
index 2a8e36b1de..cbb961f235 100644
--- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
@@ -128,5 +128,51 @@ private:
float _scale;
int _scale_exponent;
};
+
+/** Interface for the complex pixelwise multiplication kernel. */
+class NEComplexPixelWiseMultiplicationKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEComplexPixelWiseMultiplicationKernel";
+ }
+ /** Default constructor.*/
+ NEComplexPixelWiseMultiplicationKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEComplexPixelWiseMultiplicationKernel(const NEComplexPixelWiseMultiplicationKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEComplexPixelWiseMultiplicationKernel &operator=(const NEComplexPixelWiseMultiplicationKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEComplexPixelWiseMultiplicationKernel(NEComplexPixelWiseMultiplicationKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEComplexPixelWiseMultiplicationKernel &operator=(NEComplexPixelWiseMultiplicationKernel &&) = default;
+ /** Initialise the kernel's input, output and border mode.
+ *
+ * @param[in] input1 An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ */
+ void configure(const ITensor *input1, const ITensor *input2, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplicationKernel
+ *
+ * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[in] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+ BorderSize border_size() const override;
+
+private:
+ const ITensor *_input1;
+ const ITensor *_input2;
+ ITensor *_output;
+};
+
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEPIXELWISEMULTIPLICATIONKERNEL_H__ */
diff --git a/arm_compute/runtime/FunctionDescriptors.h b/arm_compute/runtime/FunctionDescriptors.h
index f9b16e4218..4700839cfd 100644
--- a/arm_compute/runtime/FunctionDescriptors.h
+++ b/arm_compute/runtime/FunctionDescriptors.h
@@ -23,7 +23,6 @@
*/
#ifndef __ARM_COMPUTE_RUNTIME_FUNCTION_DESCRIPTORS_H__
#define __ARM_COMPUTE_RUNTIME_FUNCTION_DESCRIPTORS_H__
-
#include <utility>
namespace arm_compute
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 869eb523dd..d84422f882 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -65,6 +65,7 @@
#include "arm_compute/runtime/NEON/functions/NEErode.h"
#include "arm_compute/runtime/NEON/functions/NEFFT1D.h"
#include "arm_compute/runtime/NEON/functions/NEFFT2D.h"
+#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEFastCorners.h"
#include "arm_compute/runtime/NEON/functions/NEFillBorder.h"
#include "arm_compute/runtime/NEON/functions/NEFlattenLayer.h"
diff --git a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
index 5b53aec9e6..3c1d20aeb8 100644
--- a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,6 +29,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/MemoryGroup.h"
#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#include <memory>
@@ -41,6 +42,7 @@ class ITensor;
* -# @ref NEGEMMConvolutionLayer (executed only in case GEMM is required for the operation)
* -# @ref NEWinogradConvolutionLayer (executed only in case Winograd is required for the operation)
* -# @ref NEDirectConvolutionLayer (executed only in case Direct Convolution is required for the operation)
+ * -# @ref NEFFTConvolutionLayer (executed only in case FFT is required for the operation)
*/
class NEConvolutionLayer : public IFunction
{
diff --git a/arm_compute/runtime/NEON/functions/NEFFT1D.h b/arm_compute/runtime/NEON/functions/NEFFT1D.h
index c706936f77..ca3ef16d8f 100644
--- a/arm_compute/runtime/NEON/functions/NEFFT1D.h
+++ b/arm_compute/runtime/NEON/functions/NEFFT1D.h
@@ -51,8 +51,9 @@ public:
NEFFT1D(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the function's source and destinations.
*
- * @param[in] input Source tensor. Data types supported: F32.
- * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input.
+ * @param[in] input Source tensor. Data types supported: F32. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
+ * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input.
+ * Number of channels supported: 1 (real tensor) or 2 (complex tensor).If @p input is real, @p output must be complex.
* @param[in] config FFT related configuration
*/
void configure(const ITensor *input, ITensor *output, const FFT1DInfo &config);
diff --git a/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h
new file mode 100644
index 0000000000..32c6eaa569
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H__
+#define __ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
+#include "arm_compute/runtime/NEON/functions/NEFFT2D.h"
+#include "arm_compute/runtime/NEON/functions/NEPadLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEPermute.h"
+#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
+#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
+#include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEReverse.h"
+#include "arm_compute/runtime/NEON/functions/NESlice.h"
+
+namespace arm_compute
+{
+// Forward declarations
+class ITensor;
+
+/** Basic function to execute FFT-based convolution on NEON. This function calls the following NEON functions/kernels:
+ *
+ * -# @ref NEPermute Permute input if NHWC(only NCHW is supported).
+ * -# @ref NEPadLayer Pad input.
+ * -# @ref NEFFT2D Forward transform to the frequency domain.
+ * -# @ref NEComplexPixelWiseMultiplication Complex element-wise product of input and the weights.
+ * -# @ref NEReductionOperation Reduction across channels.
+ * -# @ref NEFFT2D Inverse transform back to the time domain.
+ * -# @ref NEStridedSlice Extract valid output.
+ * -# @ref NEArithmeticAddition Add bias.
+ * -# @ref NEActivationLayer Perform activation.
+ * -# @ref NEPermute Permute output if NHWC(only NCHW is supported).
+ */
+class NEFFTConvolutionLayer : public IFunction
+{
+public:
+ /** Default constructor */
+ NEFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEFFTConvolutionLayer(const NEFFTConvolutionLayer &) = delete;
+ /** Default move constructor */
+ NEFFTConvolutionLayer(NEFFTConvolutionLayer &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEFFTConvolutionLayer &operator=(const NEFFTConvolutionLayer &) = delete;
+ /** Default move assignment operator */
+ NEFFTConvolutionLayer &operator=(NEFFTConvolutionLayer &&) = default;
+ /** Set the input and output tensors.
+ *
+ * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
+ * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ */
+ void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref NEFFTConvolutionLayer
+ *
+ * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
+ * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ // Inherited methods overridden:
+ void run() override;
+ void prepare() override;
+
+private:
+ MemoryGroup _memory_group;
+ NEReverse _flip_weights_func;
+ NEPermute _permute_input_func;
+ NEPermute _permute_output_func;
+ NEPermute _permute_weights_func;
+ NEPermute _permute_bias_func;
+ NEPadLayer _pad_input_func;
+ NEPadLayer _pad_weights_func;
+ NEFFT2D _transform_input_func;
+ std::unique_ptr<NEFFT2D> _transform_weights_func;
+ NEFFT2D _itransform_output_func;
+ NEComplexPixelWiseMultiplication _prod_func;
+ NEReductionOperation _reduce_func;
+ NESlice _extract_output_func;
+ NEArithmeticAddition _bias_add_func;
+ NEActivationLayer _activation_layer_func;
+
+ Tensor _permuted_input;
+ Tensor _permuted_weights;
+ Tensor _permuted_bias;
+ Tensor _permuted_output;
+ Tensor _padded_input;
+ Tensor _padded_weights;
+ Tensor _flip_axis;
+ Tensor _flipped_weights;
+ Tensor _transformed_input;
+ Tensor _transformed_weights;
+ Tensor _input_weights_product;
+ Tensor _output_product;
+ Tensor _output_reduced;
+ Tensor _itransformed_output;
+ Tensor _reshaped_output;
+ Tensor _bias_output;
+
+ const ITensor *_original_weights;
+ const ITensor *_original_bias;
+ bool _is_activationlayer_enabled;
+ bool _needs_permute;
+ bool _has_bias;
+ bool _is_prepared;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
index 869dd4e1d5..53c27c47bf 100644
--- a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
+++ b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
@@ -68,5 +68,27 @@ public:
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
};
+
+/** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
+class NEComplexPixelWiseMultiplication : public INESimpleFunction
+{
+public:
+ /** Initialise the kernel's inputs, output.
+ *
+ * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+ * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+ * @param[out] output The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
+ */
+ void configure(ITensor *input1, ITensor *input2, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
+ *
+ * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[in] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+};
}
#endif /*__ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H__ */