aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2019-03-26 17:44:40 +0000
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2019-05-09 12:38:22 +0000
commit154bc1c3e6a0182e2130c7966af3944ee6ca20b3 (patch)
tree6cf717250870f311c99a4fbb6cdae4dfa84d5aae
parentae1a89ed670956b9722fe57c2dc36c75e5f948ec (diff)
downloadComputeLibrary-154bc1c3e6a0182e2130c7966af3944ee6ca20b3.tar.gz
COMPMID-1973: Implement FFTConvolutionLayer on NEON
Change-Id: I2e667c0411bda0164a616ffe44473a78de6752c9 Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1066 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h34
-rw-r--r--arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h8
-rw-r--r--arm_compute/core/NEON/kernels/NEFFTScaleKernel.h8
-rw-r--r--arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h46
-rw-r--r--arm_compute/runtime/FunctionDescriptors.h1
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEConvolutionLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEFFT1D.h5
-rw-r--r--arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h154
-rw-r--r--arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h22
-rw-r--r--src/core/NEON/kernels/NEFFTDigitReverseKernel.cpp199
-rw-r--r--src/core/NEON/kernels/NEFFTScaleKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp137
-rw-r--r--src/core/NEON/kernels/NEReductionOperationKernel.cpp101
-rw-r--r--src/runtime/NEON/functions/NEConvolutionLayer.cpp27
-rw-r--r--src/runtime/NEON/functions/NEFFT1D.cpp39
-rw-r--r--src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp381
-rw-r--r--src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp30
-rw-r--r--src/runtime/NEON/functions/NEReductionOperation.cpp3
-rw-r--r--tests/benchmark/NEON/ConvolutionLayer.cpp13
-rw-r--r--tests/benchmark/NEON/FFT.cpp60
-rw-r--r--tests/validation/NEON/FFT.cpp21
22 files changed, 1219 insertions, 77 deletions
diff --git a/arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h b/arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h
index 84d55fd8f4..0090c8cb14 100644
--- a/arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h
+++ b/arm_compute/core/NEON/kernels/NEFFTDigitReverseKernel.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_NEFFTDIGITREVERSEKERNEL_H__
#define __ARM_COMPUTE_NEFFTDIGITREVERSEKERNEL_H__
+#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/NEON/INEKernel.h"
namespace arm_compute
@@ -53,31 +54,40 @@ public:
~NEFFTDigitReverseKernel() = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: F32.
- * @param[out] output Destination tensor. Data type supported: same as @p input
+ * @param[in] input Source tensor. Data types supported: F32. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
+ * @param[out] output Destination tensor. Data type supported: same as @p input. Number of channels supported: 2 (complex tensor).
* @param[in] idx Digit reverse index tensor. Data type supported: U32
- * @param[in] axis Axis to perform digit reverse on.
+ * @param[in] config Kernel configuration.
*/
- void configure(const ITensor *input, ITensor *output, const ITensor *idx, unsigned int axis);
+ void configure(const ITensor *input, ITensor *output, const ITensor *idx, const FFTDigitReverseKernelInfo &config);
+
/** Static function to check if given info will lead to a valid configuration of @ref NEFFTDigitReverseKernel
*
- * @param[in] input Source tensor info. Data types supported: F32.
- * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] input Source tensor info. Data types supported: F32. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
+ * @param[in] output Destination tensor info. Data type supported: same as @p input. Number of channels supported: 2 (complex tensor).
* @param[in] idx Digit reverse index tensor info. Data type supported: U32
- * @param[in] axis Axis to perform digit reverse on.
+ * @param[in] config Kernel configuration
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, unsigned int axis);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
private:
- const ITensor *_input;
- ITensor *_output;
- const ITensor *_idx;
- unsigned int _axis;
+ using NEFFTDigitReverseKernelFunctionPtr = void (NEFFTDigitReverseKernel::*)(const Window &window);
+
+ template <bool is_input_complex, bool is_conj>
+ void digit_reverse_kernel_axis_0(const Window &window);
+
+ template <bool is_input_complex, bool is_conj>
+ void digit_reverse_kernel_axis_1(const Window &window);
+
+ NEFFTDigitReverseKernelFunctionPtr _func;
+ const ITensor *_input;
+ ITensor *_output;
+ const ITensor *_idx;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEFFTDIGITREVERSEKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h b/arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h
index 8498d3c613..2210980816 100644
--- a/arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h
+++ b/arm_compute/core/NEON/kernels/NEFFTRadixStageKernel.h
@@ -59,15 +59,15 @@ public:
*
* @note If the output tensor is nullptr, the FFT will be performed in-place
*
- * @param[in,out] input Source tensor. Data types supported: F32.
- * @param[out] output Destination tensor. Data type supported: same as @p input
+ * @param[in,out] input Source tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[out] output Destination tensor. Data type supported: same as @p input. Number of channels supported: same as @p input.
* @param[in] config FFT descriptor metadata.
*/
void configure(ITensor *input, ITensor *output, const FFTRadixStageKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref NEFFTRadixStageKernel
*
- * @param[in] input Source tensor info. Data types supported: F32.
- * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] input Source tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] output Destination tensor info. Data type supported: same as @p input. Number of channels supported: same as @p input.
* @param[in] config FFT descriptor metadata.
*
* @return a status
diff --git a/arm_compute/core/NEON/kernels/NEFFTScaleKernel.h b/arm_compute/core/NEON/kernels/NEFFTScaleKernel.h
index 5a19af7e62..51e6d5ab20 100644
--- a/arm_compute/core/NEON/kernels/NEFFTScaleKernel.h
+++ b/arm_compute/core/NEON/kernels/NEFFTScaleKernel.h
@@ -55,15 +55,15 @@ public:
~NEFFTScaleKernel() = default;
/** Set the input and output tensors.
*
- * @param[in,out] input Source tensor. Data types supported: F32.
- * @param[out] output Destination tensor. Data type supported: same as @p input
+ * @param[in,out] input Source tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[out] output Destination tensor. Data type supported: same as @p input. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
* @param[in] config Kernel configuration
*/
void configure(ITensor *input, ITensor *output, const FFTScaleKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref NEFFTScaleKernel
*
- * @param[in] input Source tensor info. Data types supported: F32.
- * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] input Source tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] output Destination tensor info. Data type supported: same as @p input. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
* @param[in] config Kernel configuration
*
* @return a status
diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
index 2a8e36b1de..cbb961f235 100644
--- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
@@ -128,5 +128,51 @@ private:
float _scale;
int _scale_exponent;
};
+
+/** Interface for the complex pixelwise multiplication kernel. */
+class NEComplexPixelWiseMultiplicationKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEComplexPixelWiseMultiplicationKernel";
+ }
+ /** Default constructor.*/
+ NEComplexPixelWiseMultiplicationKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEComplexPixelWiseMultiplicationKernel(const NEComplexPixelWiseMultiplicationKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEComplexPixelWiseMultiplicationKernel &operator=(const NEComplexPixelWiseMultiplicationKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEComplexPixelWiseMultiplicationKernel(NEComplexPixelWiseMultiplicationKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEComplexPixelWiseMultiplicationKernel &operator=(NEComplexPixelWiseMultiplicationKernel &&) = default;
+ /** Initialise the kernel's input, output and border mode.
+ *
+ * @param[in] input1 An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ */
+ void configure(const ITensor *input1, const ITensor *input2, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplicationKernel
+ *
+ * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[in] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+ BorderSize border_size() const override;
+
+private:
+ const ITensor *_input1;
+ const ITensor *_input2;
+ ITensor *_output;
+};
+
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEPIXELWISEMULTIPLICATIONKERNEL_H__ */
diff --git a/arm_compute/runtime/FunctionDescriptors.h b/arm_compute/runtime/FunctionDescriptors.h
index f9b16e4218..4700839cfd 100644
--- a/arm_compute/runtime/FunctionDescriptors.h
+++ b/arm_compute/runtime/FunctionDescriptors.h
@@ -23,7 +23,6 @@
*/
#ifndef __ARM_COMPUTE_RUNTIME_FUNCTION_DESCRIPTORS_H__
#define __ARM_COMPUTE_RUNTIME_FUNCTION_DESCRIPTORS_H__
-
#include <utility>
namespace arm_compute
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 869eb523dd..d84422f882 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -65,6 +65,7 @@
#include "arm_compute/runtime/NEON/functions/NEErode.h"
#include "arm_compute/runtime/NEON/functions/NEFFT1D.h"
#include "arm_compute/runtime/NEON/functions/NEFFT2D.h"
+#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEFastCorners.h"
#include "arm_compute/runtime/NEON/functions/NEFillBorder.h"
#include "arm_compute/runtime/NEON/functions/NEFlattenLayer.h"
diff --git a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
index 5b53aec9e6..3c1d20aeb8 100644
--- a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,6 +29,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/MemoryGroup.h"
#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#include <memory>
@@ -41,6 +42,7 @@ class ITensor;
* -# @ref NEGEMMConvolutionLayer (executed only in case GEMM is required for the operation)
* -# @ref NEWinogradConvolutionLayer (executed only in case Winograd is required for the operation)
* -# @ref NEDirectConvolutionLayer (executed only in case Direct Convolution is required for the operation)
+ * -# @ref NEFFTConvolutionLayer (executed only in case FFT is required for the operation)
*/
class NEConvolutionLayer : public IFunction
{
diff --git a/arm_compute/runtime/NEON/functions/NEFFT1D.h b/arm_compute/runtime/NEON/functions/NEFFT1D.h
index c706936f77..ca3ef16d8f 100644
--- a/arm_compute/runtime/NEON/functions/NEFFT1D.h
+++ b/arm_compute/runtime/NEON/functions/NEFFT1D.h
@@ -51,8 +51,9 @@ public:
NEFFT1D(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the function's source and destinations.
*
- * @param[in] input Source tensor. Data types supported: F32.
- * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input.
+ * @param[in] input Source tensor. Data types supported: F32. Number of channels supported: 1 (real tensor) or 2 (complex tensor).
+ * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input.
+ * Number of channels supported: 1 (real tensor) or 2 (complex tensor).If @p input is real, @p output must be complex.
* @param[in] config FFT related configuration
*/
void configure(const ITensor *input, ITensor *output, const FFT1DInfo &config);
diff --git a/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h
new file mode 100644
index 0000000000..32c6eaa569
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H__
+#define __ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
+#include "arm_compute/runtime/NEON/functions/NEFFT2D.h"
+#include "arm_compute/runtime/NEON/functions/NEPadLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEPermute.h"
+#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
+#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
+#include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEReverse.h"
+#include "arm_compute/runtime/NEON/functions/NESlice.h"
+
+namespace arm_compute
+{
+// Forward declarations
+class ITensor;
+
+/** Basic function to execute FFT-based convolution on NEON. This function calls the following NEON functions/kernels:
+ *
+ * -# @ref NEPermute Permute input if NHWC(only NCHW is supported).
+ * -# @ref NEPadLayer Pad input.
+ * -# @ref NEFFT2D Forward transform to the frequency domain.
+ * -# @ref NEComplexPixelWiseMultiplication Complex element-wise product of input and the weights.
+ * -# @ref NEReductionOperation Reduction across channels.
+ * -# @ref NEFFT2D Inverse transform back to the time domain.
+ * -# @ref NEStridedSlice Extract valid output.
+ * -# @ref NEArithmeticAddition Add bias.
+ * -# @ref NEActivationLayer Perform activation.
+ * -# @ref NEPermute Permute output if NHWC(only NCHW is supported).
+ */
+class NEFFTConvolutionLayer : public IFunction
+{
+public:
+ /** Default constructor */
+ NEFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEFFTConvolutionLayer(const NEFFTConvolutionLayer &) = delete;
+ /** Default move constructor */
+ NEFFTConvolutionLayer(NEFFTConvolutionLayer &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEFFTConvolutionLayer &operator=(const NEFFTConvolutionLayer &) = delete;
+ /** Default move assignment operator */
+ NEFFTConvolutionLayer &operator=(NEFFTConvolutionLayer &&) = default;
+ /** Set the input and output tensors.
+ *
+ * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
+ * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ */
+ void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref NEFFTConvolutionLayer
+ *
+ * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
+ * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ // Inherited methods overridden:
+ void run() override;
+ void prepare() override;
+
+private:
+ MemoryGroup _memory_group;
+ NEReverse _flip_weights_func;
+ NEPermute _permute_input_func;
+ NEPermute _permute_output_func;
+ NEPermute _permute_weights_func;
+ NEPermute _permute_bias_func;
+ NEPadLayer _pad_input_func;
+ NEPadLayer _pad_weights_func;
+ NEFFT2D _transform_input_func;
+ std::unique_ptr<NEFFT2D> _transform_weights_func;
+ NEFFT2D _itransform_output_func;
+ NEComplexPixelWiseMultiplication _prod_func;
+ NEReductionOperation _reduce_func;
+ NESlice _extract_output_func;
+ NEArithmeticAddition _bias_add_func;
+ NEActivationLayer _activation_layer_func;
+
+ Tensor _permuted_input;
+ Tensor _permuted_weights;
+ Tensor _permuted_bias;
+ Tensor _permuted_output;
+ Tensor _padded_input;
+ Tensor _padded_weights;
+ Tensor _flip_axis;
+ Tensor _flipped_weights;
+ Tensor _transformed_input;
+ Tensor _transformed_weights;
+ Tensor _input_weights_product;
+ Tensor _output_product;
+ Tensor _output_reduced;
+ Tensor _itransformed_output;
+ Tensor _reshaped_output;
+ Tensor _bias_output;
+
+ const ITensor *_original_weights;
+ const ITensor *_original_bias;
+ bool _is_activationlayer_enabled;
+ bool _needs_permute;
+ bool _has_bias;
+ bool _is_prepared;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
index 869dd4e1d5..53c27c47bf 100644
--- a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
+++ b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
@@ -68,5 +68,27 @@ public:
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
};
+
+/** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
+class NEComplexPixelWiseMultiplication : public INESimpleFunction
+{
+public:
+ /** Initialise the kernel's inputs, output.
+ *
+ * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+ * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+ * @param[out] output The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
+ */
+ void configure(ITensor *input1, ITensor *input2, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
+ *
+ * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[in] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+};
}
#endif /*__ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H__ */
diff --git a/src/core/NEON/kernels/NEFFTDigitReverseKernel.cpp b/src/core/NEON/kernels/NEFFTDigitReverseKernel.cpp
index b2ffb01e99..cf77345da7 100644
--- a/src/core/NEON/kernels/NEFFTDigitReverseKernel.cpp
+++ b/src/core/NEON/kernels/NEFFTDigitReverseKernel.cpp
@@ -29,19 +29,24 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include <set>
+
namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, unsigned int axis)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() != DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() > 2);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(idx, 1, DataType::U32);
- ARM_COMPUTE_RETURN_ERROR_ON(axis > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[config.axis] != idx->tensor_shape().x());
// Checks performed when output is configured
if((output != nullptr) && (output->total_size() != 0))
{
+ ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != 2);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -49,75 +54,207 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, ITensorInfo *idx, unsigned int axis)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, ITensorInfo *idx, const FFTDigitReverseKernelInfo &config)
{
- ARM_COMPUTE_UNUSED(idx, axis);
+ ARM_COMPUTE_UNUSED(idx, config);
- auto_init_if_empty(*output, *input);
+ auto_init_if_empty(*output, input->clone()->set_num_channels(2));
- Window win = calculate_max_window(*output, Steps());
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
+ Window win = calculate_max_window(*input, Steps());
+ input->set_valid_region(ValidRegion(Coordinates(), input->tensor_shape()));
return std::make_pair(Status{}, win);
}
} // namespace
NEFFTDigitReverseKernel::NEFFTDigitReverseKernel()
- : _input(nullptr), _output(nullptr), _idx(nullptr), _axis(0)
+ : _func(nullptr), _input(nullptr), _output(nullptr), _idx(nullptr)
{
}
-void NEFFTDigitReverseKernel::configure(const ITensor *input, ITensor *output, const ITensor *idx, unsigned int axis)
+void NEFFTDigitReverseKernel::configure(const ITensor *input, ITensor *output, const ITensor *idx, const FFTDigitReverseKernelInfo &config)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, idx);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), idx->info(), axis));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), idx->info(), config));
_input = input;
_output = output;
_idx = idx;
- _axis = axis;
+
+ const size_t axis = config.axis;
+ const bool is_conj = config.conjugate;
+ const bool is_input_complex = (input->info()->num_channels() == 2);
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), idx->info(), axis);
+ auto win_config = validate_and_configure_window(input->info(), output->info(), idx->info(), config);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
INEKernel::configure(win_config.second);
+
+ if(axis == 0)
+ {
+ if(is_input_complex)
+ {
+ if(is_conj)
+ {
+ _func = &NEFFTDigitReverseKernel::digit_reverse_kernel_axis_0<true, true>;
+ }
+ else
+ {
+ _func = &NEFFTDigitReverseKernel::digit_reverse_kernel_axis_0<true, false>;
+ }
+ }
+ else
+ {
+ _func = &NEFFTDigitReverseKernel::digit_reverse_kernel_axis_0<false, false>;
+ }
+ }
+ else if(axis == 1)
+ {
+ if(is_input_complex)
+ {
+ if(is_conj)
+ {
+ _func = &NEFFTDigitReverseKernel::digit_reverse_kernel_axis_1<true, true>;
+ }
+ else
+ {
+ _func = &NEFFTDigitReverseKernel::digit_reverse_kernel_axis_1<true, false>;
+ }
+ }
+ else
+ {
+ _func = &NEFFTDigitReverseKernel::digit_reverse_kernel_axis_1<false, false>;
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Not supported");
+ }
}
-Status NEFFTDigitReverseKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, unsigned int axis)
+Status NEFFTDigitReverseKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, idx, axis));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), idx->clone().get(), axis).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, idx, config));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), idx->clone().get(), config).first);
return Status{};
}
-void NEFFTDigitReverseKernel::run(const Window &window, const ThreadInfo &info)
+template <bool is_input_complex, bool is_conj>
+void NEFFTDigitReverseKernel::digit_reverse_kernel_axis_0(const Window &window)
{
- ARM_COMPUTE_UNUSED(info);
- Iterator out(_output, window);
- const size_t element_size = _input->info()->element_size();
+ const size_t N = _input->info()->dimension(0);
+
+ // Copy the look-up buffer to a local array
+ std::vector<unsigned int> buffer_idx(N);
+ std::copy_n(reinterpret_cast<unsigned int *>(_idx->buffer()), N, buffer_idx.data());
+
+ // Input/output iterators
+ Window slice = window;
+ slice.set(0, Window::DimX);
+ Iterator in(_input, slice);
+ Iterator out(_output, slice);
+
+ // Row buffers
+ std::vector<float> buffer_row_out(2 * N);
+ std::vector<float> buffer_row_in(2 * N);
+
+ execute_window_loop(slice, [&](const Coordinates &)
+ {
+ if(is_input_complex)
+ {
+ // Load
+ memcpy(buffer_row_in.data(), reinterpret_cast<float *>(in.ptr()), 2 * N * sizeof(float));
- // Pointers to the buffers
- const size_t offset = _input->info()->offset_first_element_in_bytes();
- auto *idx_ptr = reinterpret_cast<unsigned int *>(_idx->buffer());
- uint8_t *input_ptr = offset + _input->buffer();
+ // Shuffle
+ for(size_t x = 0; x < 2 * N; x += 2)
+ {
+ size_t idx = buffer_idx[x / 2];
+ buffer_row_out[x] = buffer_row_in[2 * idx];
+ buffer_row_out[x + 1] = (is_conj ? -buffer_row_in[2 * idx + 1] : buffer_row_in[2 * idx + 1]);
+ }
+ }
+ else
+ {
+ // Load
+ memcpy(buffer_row_in.data(), reinterpret_cast<float *>(in.ptr()), N * sizeof(float));
+
+ // Shuffle
+ for(size_t x = 0; x < N; ++x)
+ {
+ size_t idx = buffer_idx[x];
+ buffer_row_out[2 * x] = buffer_row_in[idx];
+ }
+ }
+
+ // Copy back
+ memcpy(reinterpret_cast<float *>(out.ptr()), buffer_row_out.data(), 2 * N * sizeof(float));
+ },
+ in, out);
+}
+
+template <bool is_input_complex, bool is_conj>
+void NEFFTDigitReverseKernel::digit_reverse_kernel_axis_1(const Window &window)
+{
+ const size_t Nx = _input->info()->dimension(0);
+ const size_t Ny = _input->info()->dimension(1);
+
+ // Copy the look-up buffer to a local array
+ std::vector<unsigned int> buffer_idx(Ny);
+ std::copy_n(reinterpret_cast<unsigned int *>(_idx->buffer()), Ny, buffer_idx.data());
+
+ // Output iterator
+ Window slice = window;
+ slice.set(0, Window::DimX);
+ Iterator out(_output, slice);
+
+ // Row buffer
+ std::vector<float> buffer_row(Nx);
// Strides
- const size_t stride_x = _input->info()->strides_in_bytes()[0];
- const size_t stride_y = _input->info()->strides_in_bytes()[1];
const size_t stride_z = _input->info()->strides_in_bytes()[2];
const size_t stride_w = _input->info()->strides_in_bytes()[3];
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(slice, [&](const Coordinates & id)
{
- unsigned int in_index_1d = idx_ptr[id[_axis]];
- auto reverse_id = id;
- reverse_id.set(_axis, in_index_1d);
+ auto *out_ptr = reinterpret_cast<float *>(out.ptr());
+ auto *in_ptr = reinterpret_cast<float *>(_input->buffer() + id.z() * stride_z + id[3] * stride_w);
+ const size_t y_shuffled = buffer_idx[id.y()];
+
+ if(is_input_complex)
+ {
+ // Shuffle the entire row into the output
+ memcpy(out_ptr, in_ptr + 2 * Nx * y_shuffled, 2 * Nx * sizeof(float));
- memcpy(out.ptr(), input_ptr + reverse_id.x() * stride_x + reverse_id.y() * stride_y + reverse_id.z() * stride_z + reverse_id[3] * stride_w, element_size);
+ // Conjugate if necessary
+ if(is_conj)
+ {
+ for(size_t x = 0; x < 2 * Nx; x += 2)
+ {
+ out_ptr[x + 1] = -out_ptr[x + 1];
+ }
+ }
+ }
+ else
+ {
+ // Shuffle the entire row into the buffer
+ memcpy(buffer_row.data(), in_ptr + Nx * y_shuffled, Nx * sizeof(float));
+
+ // Copy the buffer to the output, with a zero imaginary part
+ for(size_t x = 0; x < 2 * Nx; x += 2)
+ {
+ out_ptr[x] = buffer_row[x / 2];
+ }
+ }
},
out);
+}
+void NEFFTDigitReverseKernel::run(const Window &window, const ThreadInfo &info)
+{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+ ARM_COMPUTE_UNUSED(info);
+ (this->*_func)(window);
}
+
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEFFTScaleKernel.cpp b/src/core/NEON/kernels/NEFFTScaleKernel.cpp
index 6568755e5d..56703bafcc 100644
--- a/src/core/NEON/kernels/NEFFTScaleKernel.cpp
+++ b/src/core/NEON/kernels/NEFFTScaleKernel.cpp
@@ -129,7 +129,7 @@ void NEFFTScaleKernel::run(const Window &window, const ThreadInfo &info)
execute_window_loop(window, [&](const Coordinates &)
{
- scale_complex(reinterpret_cast<float *>(out.ptr()), reinterpret_cast<float *>(in.ptr()), _is_conj, _scale);
+ scale_complex(reinterpret_cast<float *>(in.ptr()), reinterpret_cast<float *>(out.ptr()), _is_conj, _scale);
},
in, out);
}
diff --git a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
index b565300906..fa16484cd3 100644
--- a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
+++ b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
@@ -30,6 +30,7 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/NEON/NEAsymm.h"
#include "arm_compute/core/NEON/NEFixedPoint.h"
+#include "arm_compute/core/NEON/wrapper/wrapper.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
@@ -353,6 +354,35 @@ void mul_F32_F32_F32_n(const void *__restrict input1_ptr, const void *__restrict
vst4q_f32(output, result);
}
+void c_mul_F32_F32_F32_n(const void *__restrict input1_ptr, const void *__restrict input2_ptr, void *__restrict output_ptr)
+{
+ const auto input1 = static_cast<const float *__restrict>(input1_ptr);
+ const auto input2 = static_cast<const float *__restrict>(input2_ptr);
+ const auto output = static_cast<float *__restrict>(output_ptr);
+
+ const float32x4_t a = wrapper::vloadq(input1);
+ float32x4_t b = wrapper::vloadq(input2);
+
+ using ExactTagType = typename wrapper::traits::neon_vector<float, 2>::tag_type;
+
+ const float32x4_t mask = { -1.0f, 1.0f, -1.0f, 1.0f };
+ const float32x2_t tmp00 = wrapper::vdup_n(wrapper::vgetlane(a, 0), ExactTagType{});
+ const float32x2_t tmp01 = wrapper::vdup_n(wrapper::vgetlane(a, 1), ExactTagType{});
+ const float32x2_t tmp10 = wrapper::vdup_n(wrapper::vgetlane(a, 2), ExactTagType{});
+ const float32x2_t tmp11 = wrapper::vdup_n(wrapper::vgetlane(a, 3), ExactTagType{});
+
+ const float32x4_t tmp0 = wrapper::vcombine(tmp00, tmp10);
+ const float32x4_t tmp1 = wrapper::vcombine(tmp01, tmp11);
+
+ float32x4_t res = wrapper::vmul(tmp0, b);
+
+ b = wrapper::vrev64(b);
+ b = wrapper::vmul(b, mask);
+
+ res = wrapper::vmla(res, tmp1, b);
+ wrapper::vstore(output, res);
+}
+
void mul_F16_F16_F16_n(const void *__restrict input1_ptr, const void *__restrict input2_ptr, void *__restrict output_ptr, float scale)
{
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -665,4 +695,111 @@ BorderSize NEPixelWiseMultiplicationKernel::border_size() const
const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
return BorderSize{ 0, border, 0, 0 };
}
+
+namespace
+{
+constexpr unsigned int num_elems_processed_per_iteration_complex = 2;
+
+Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 2, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 2, DataType::F32);
+
+ const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
+
+ // Validate in case of configured output
+ if(output->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 2, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window_complex(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
+{
+ const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
+ const TensorShape &out_shape = broadcast_pair.first;
+ const ValidRegion &valid_region = broadcast_pair.second;
+
+ // Auto initialize output if not initialized
+ const TensorInfo out_info(out_shape, input1->num_channels(), input1->data_type());
+ auto_init_if_empty(*output, out_info);
+
+ Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration_complex));
+ Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
+ Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
+
+ AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration_complex);
+ AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration_complex);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration_complex);
+
+ bool window_changed = update_window_and_padding(win_input1, input1_access)
+ || update_window_and_padding(win_input2, input2_access)
+ || update_window_and_padding(win, output_access);
+
+ output_access.set_valid_region(win, valid_region);
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+}
+} // namespace
+
+NEComplexPixelWiseMultiplicationKernel::NEComplexPixelWiseMultiplicationKernel()
+ : _input1(nullptr), _input2(nullptr), _output(nullptr)
+{
+}
+
+void NEComplexPixelWiseMultiplicationKernel::configure(const ITensor *input1, const ITensor *input2, ITensor *output)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1->info(), input2->info(), output->info()));
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window_complex(input1->info(), input2->info(), output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+
+ _input1 = input1;
+ _input2 = input2;
+ _output = output;
+
+ // Create kernel
+ INEKernel::configure(win_config.second);
+}
+
+Status NEComplexPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_complex(input1, input2, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_complex(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
+
+ return Status{};
+}
+
+void NEComplexPixelWiseMultiplicationKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+ Iterator input1(_input1, window.broadcast_if_dimension_le_one(_input1->info()->tensor_shape()));
+ Iterator input2(_input2, window.broadcast_if_dimension_le_one(_input2->info()->tensor_shape()));
+ Iterator output(_output, window);
+
+ execute_window_loop(window, [&](const Coordinates &)
+ {
+ c_mul_F32_F32_F32_n(input1.ptr(), input2.ptr(), output.ptr());
+ },
+ input1, input2, output);
+}
+
+BorderSize NEComplexPixelWiseMultiplicationKernel::border_size() const
+{
+ const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
+ const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration_complex - 1U, replicateSize);
+ return { 0, border, 0, 0 };
+}
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEReductionOperationKernel.cpp b/src/core/NEON/kernels/NEReductionOperationKernel.cpp
index e6fdba2696..aa20d1f40d 100644
--- a/src/core/NEON/kernels/NEReductionOperationKernel.cpp
+++ b/src/core/NEON/kernels/NEReductionOperationKernel.cpp
@@ -602,7 +602,7 @@ struct RedOpYZW
{
ARM_COMPUTE_UNUSED(out_slice);
- execute_window_loop(in_slice, [&](const Coordinates & id)
+ execute_window_loop(in_slice, [&](const Coordinates &)
{
neon_vector vec_res_value = { 0 };
if(op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN)
@@ -688,13 +688,70 @@ struct RedOpYZW
}
};
+template <typename T, int S, int axis, ReductionOperation op>
+struct RedOpYZW_complex
+{
+ /** NEON vector tag type. */
+ using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
+ using neon_vector = typename wrapper::traits::neon_vector<T, S>::type;
+
+ inline void operator()(Iterator &input, Iterator &output, Window &in_slice, Window &out_slice, const TensorInfo &in_info, int, const ReductionOperation)
+ {
+ ARM_COMPUTE_UNUSED(out_slice);
+ ARM_COMPUTE_ERROR_ON(axis != 2);
+
+ const size_t stride_z = in_info.strides_in_bytes()[axis];
+
+ execute_window_loop(in_slice, [&](const Coordinates &)
+ {
+ neon_vector vec_res_value_0 = { 0 };
+ neon_vector vec_res_value_1 = { 0 };
+
+ vec_res_value_0 = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+ vec_res_value_1 = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+
+ for(unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
+ {
+ T *in_ptr_0;
+ T *in_ptr_1;
+ switch(axis)
+ {
+ case 2:
+ in_ptr_0 = reinterpret_cast<T *>(input.ptr() + stride_z * dim);
+ in_ptr_1 = reinterpret_cast<T *>(input.ptr() + 16 + stride_z * dim);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ const auto vec_elements_0 = wrapper::vloadq(in_ptr_0);
+ const auto vec_elements_1 = wrapper::vloadq(in_ptr_1);
+
+ switch(op)
+ {
+ case ReductionOperation::SUM:
+ vec_res_value_0 = wrapper::vadd(vec_elements_0, vec_res_value_0);
+ vec_res_value_1 = wrapper::vadd(vec_elements_1, vec_res_value_1);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ }
+
+ wrapper::vstore(reinterpret_cast<T *>(output.ptr()), vec_res_value_0);
+ wrapper::vstore(reinterpret_cast<T *>(output.ptr() + 16), vec_res_value_1);
+
+ },
+ input, output);
+ }
+};
+
struct RedOpYZW_qasymm8
{
inline void operator()(Iterator &input, Iterator &output, Window &in_slice, Window &out_slice, const TensorInfo &in_info, int axis, const ReductionOperation op)
{
ARM_COMPUTE_UNUSED(out_slice);
- execute_window_loop(in_slice, [&](const Coordinates & id)
+ execute_window_loop(in_slice, [&](const Coordinates &)
{
uint32x4x4_t vec_res_idx{ { 0 } };
auto vec_res_value1 = vdupq_n_u32(0);
@@ -848,6 +905,31 @@ struct RedOpYZW_qasymm8
void reduce_op(const Window &window, const ITensor *input, ITensor *output, unsigned int axis, const ReductionOperation op)
{
+ const bool is_complex = (input->info()->num_channels() == 2);
+
+ if(is_complex)
+ {
+ switch(axis)
+ {
+ case 2:
+ switch(input->info()->data_type())
+ {
+ case DataType::F32:
+ switch(op)
+ {
+ case ReductionOperation::SUM:
+ return Reducer<RedOpYZW_complex<float, 4, 2, ReductionOperation::SUM>>::reduceZ(window, input, output, RedOpYZW_complex<float, 4, 2, ReductionOperation::SUM>(), op);
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ }
+
switch(axis)
{
case 0:
@@ -917,7 +999,17 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+
+ if(input->num_channels() == 1)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(op != ReductionOperation::SUM);
+ ARM_COMPUTE_RETURN_ERROR_ON(axis != 2);
+ }
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
@@ -929,6 +1021,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != output->num_channels());
}
else
{
@@ -951,7 +1044,7 @@ std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITe
// Output auto initialization if not yet initialized
const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX);
DataType output_data_type = is_arg_min_max ? DataType::U32 : input->data_type();
- auto_init_if_empty(*output, output_shape, 1, output_data_type, input->quantization_info());
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
unsigned int num_elems_processed_per_iteration = 16 / data_size_from_type(input->data_type());
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index 5059162032..a62459b3e8 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -73,6 +73,13 @@ void NEConvolutionLayer::configure(ITensor *input, const ITensor *weights, const
_function = std::move(f);
break;
}
+ case ConvolutionMethod::FFT:
+ {
+ auto f = arm_compute::support::cpp14::make_unique<NEFFTConvolutionLayer>(_memory_manager);
+ f->configure(input, weights, biases, output, conv_info, act_info);
+ _function = std::move(f);
+ break;
+ }
default:
ARM_COMPUTE_ERROR("Not supported.");
break;
@@ -97,6 +104,10 @@ Status NEConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo
case ConvolutionMethod::DIRECT:
//Validate Gemm-based Convolution
ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info));
+ case ConvolutionMethod::FFT:
+ // Validate FFT-based convolution layer
+ ARM_COMPUTE_RETURN_ON_ERROR(NEFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info));
+ break;
default:
ARM_COMPUTE_ERROR("Not supported.");
break;
@@ -148,12 +159,22 @@ ConvolutionMethod NEConvolutionLayer::get_convolution_method(const ITensorInfo *
return (*found).second;
}
- if(dilation != Size2D(1U, 1U) || input->dimension(get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL)) <= 16)
+ if(dilation != Size2D(1U, 1U))
{
return ConvolutionMethod::GEMM;
}
-
- return bool(NEWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
+ else
+ {
+ if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (NEFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
+ {
+ return ConvolutionMethod::FFT;
+ }
+ if(input->dimension(idx_c) < 16)
+ {
+ return ConvolutionMethod::GEMM;
+ }
+ return bool(NEWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
+ }
}
void NEConvolutionLayer::run()
diff --git a/src/runtime/NEON/functions/NEFFT1D.cpp b/src/runtime/NEON/functions/NEFFT1D.cpp
index 665efeb440..25ba1c8391 100644
--- a/src/runtime/NEON/functions/NEFFT1D.cpp
+++ b/src/runtime/NEON/functions/NEFFT1D.cpp
@@ -37,6 +37,9 @@ NEFFT1D::NEFFT1D(std::shared_ptr<IMemoryManager> memory_manager)
void NEFFT1D::configure(const ITensor *input, ITensor *output, const FFT1DInfo &config)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_THROW_ON(NEFFT1D::validate(input->info(), output->info(), config));
+
// Decompose size to radix factors
const auto supported_radix = NEFFTRadixStageKernel::supported_radix();
const unsigned int N = input->info()->tensor_shape()[config.axis];
@@ -44,21 +47,25 @@ void NEFFT1D::configure(const ITensor *input, ITensor *output, const FFT1DInfo &
ARM_COMPUTE_ERROR_ON(decomposed_vector.empty());
// Flags
- _run_scale = config.direction == FFTDirection::Inverse;
- _axis = config.axis;
+ _run_scale = config.direction == FFTDirection::Inverse;
+
const bool is_c2r = input->info()->num_channels() == 2 && output->info()->num_channels() == 1;
// Configure digit reverse
+ FFTDigitReverseKernelInfo digit_reverse_config;
+ digit_reverse_config.axis = config.axis;
+ digit_reverse_config.conjugate = config.direction == FFTDirection::Inverse;
TensorInfo digit_reverse_indices_info(TensorShape(input->info()->tensor_shape()[config.axis]), 1, DataType::U32);
_digit_reverse_indices.allocator()->init(digit_reverse_indices_info);
_memory_group.manage(&_digit_reversed_input);
- _digit_reverse_kernel.configure(input, &_digit_reversed_input, &_digit_reverse_indices, config.axis);
+ _digit_reverse_kernel.configure(input, &_digit_reversed_input, &_digit_reverse_indices, digit_reverse_config);
// Create and configure FFT kernels
unsigned int Nx = 1;
-
- _num_ffts = decomposed_vector.size();
+ _num_ffts = decomposed_vector.size();
_fft_kernels.resize(_num_ffts);
+ _axis = config.axis;
+
for(unsigned int i = 0; i < _num_ffts; ++i)
{
const unsigned int radix_for_stage = decomposed_vector.at(i);
@@ -68,10 +75,20 @@ void NEFFT1D::configure(const ITensor *input, ITensor *output, const FFT1DInfo &
fft_kernel_info.radix = radix_for_stage;
fft_kernel_info.Nx = Nx;
fft_kernel_info.is_first_stage = (i == 0);
- _fft_kernels[i].configure(&_digit_reversed_input, i == (_num_ffts - 1) && !is_c2r ? output : nullptr, fft_kernel_info);
+ _fft_kernels[i].configure(&_digit_reversed_input, ((i == (_num_ffts - 1)) && !is_c2r) ? output : nullptr, fft_kernel_info);
+
Nx *= radix_for_stage;
}
+ // Configure scale kernel
+ if(_run_scale)
+ {
+ FFTScaleKernelInfo scale_config;
+ scale_config.scale = static_cast<float>(N);
+ scale_config.conjugate = config.direction == FFTDirection::Inverse;
+ is_c2r ? _scale_kernel.configure(&_digit_reversed_input, output, scale_config) : _scale_kernel.configure(output, nullptr, scale_config);
+ }
+
// Allocate tensors
_digit_reversed_input.allocator()->allocate();
_digit_reverse_indices.allocator()->allocate();
@@ -84,8 +101,9 @@ void NEFFT1D::configure(const ITensor *input, ITensor *output, const FFT1DInfo &
Status NEFFT1D::validate(const ITensorInfo *input, const ITensorInfo *output, const FFT1DInfo &config)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(config.axis > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() != DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() > 2);
+ ARM_COMPUTE_RETURN_ERROR_ON(std::set<unsigned int>({ 0, 1 }).count(config.axis) == 0);
// Check if FFT is decomposable
const auto supported_radix = NEFFTRadixStageKernel::supported_radix();
@@ -96,6 +114,9 @@ Status NEFFT1D::validate(const ITensorInfo *input, const ITensorInfo *output, co
// Checks performed when output is configured
if((output != nullptr) && (output->total_size() != 0))
{
+ // All combinations are supported except real input with real output (i.e., both input channels set to 1)
+ ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() == 1 && input->num_channels() == 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() > 2);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -107,7 +128,7 @@ void NEFFT1D::run()
{
MemoryGroupResourceScope scope_mg(_memory_group);
- NEScheduler::get().schedule(&_digit_reverse_kernel, (_axis == 0 ? Window::DimY : Window::DimX));
+ NEScheduler::get().schedule(&_digit_reverse_kernel, (_axis == 0 ? Window::DimY : Window::DimZ));
for(unsigned int i = 0; i < _num_ffts; ++i)
{
diff --git a/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp b/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp
new file mode 100644
index 0000000000..962402549f
--- /dev/null
+++ b/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/helpers/fft.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+namespace arm_compute
+{
+namespace
+{
+int pad_decomposable(int N)
+{
+ const auto supported_radix = NEFFTRadixStageKernel::supported_radix();
+
+ int pad = 0;
+ bool is_decomposed = false;
+ while(!is_decomposed)
+ {
+ const auto decomposed_vector = arm_compute::helpers::fft::decompose_stages(N++, supported_radix);
+ is_decomposed = !decomposed_vector.empty();
+ if(!is_decomposed)
+ {
+ ++pad;
+ }
+ }
+ return pad;
+}
+} // namespace
+
+NEFFTConvolutionLayer::NEFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(memory_manager),
+ _flip_weights_func(),
+ _permute_input_func(),
+ _permute_output_func(),
+ _permute_weights_func(),
+ _permute_bias_func(),
+ _pad_input_func(),
+ _pad_weights_func(),
+ _transform_input_func(memory_manager),
+ _transform_weights_func(),
+ _itransform_output_func(memory_manager),
+ _prod_func(),
+ _reduce_func(),
+ _extract_output_func(),
+ _bias_add_func(),
+ _activation_layer_func(),
+ _permuted_input(),
+ _permuted_weights(),
+ _permuted_bias(),
+ _permuted_output(),
+ _padded_input(),
+ _padded_weights(),
+ _flip_axis(),
+ _flipped_weights(),
+ _transformed_input(),
+ _transformed_weights(),
+ _input_weights_product(),
+ _output_product(),
+ _output_reduced(),
+ _itransformed_output(),
+ _reshaped_output(),
+ _bias_output(),
+ _original_weights(nullptr),
+ _original_bias(nullptr),
+ _is_activationlayer_enabled(false),
+ _needs_permute(false),
+ _has_bias(false),
+ _is_prepared(false)
+{
+}
+
+void NEFFTConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
+{
+ _original_weights = weights;
+ _original_bias = biases;
+
+ // Flat if bias addition is required
+ _has_bias = biases != nullptr;
+
+ // Get indices for the width and height
+ const size_t idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
+
+ // Input shape, kernel size and output tile
+ const Size2D input_dims = Size2D(input->info()->tensor_shape()[idx_width], input->info()->tensor_shape()[idx_height]);
+ const Size2D kernel_size = Size2D(weights->info()->tensor_shape()[idx_width], weights->info()->tensor_shape()[idx_height]);
+ const Size2D pad_valid = Size2D(pad_decomposable(input_dims.x() + kernel_size.x() - 1),
+ pad_decomposable(input_dims.y() + kernel_size.y() - 1));
+ // Tensors to use
+ ITensor *input_to_use = input;
+ const ITensor *weights_to_use = weights;
+ ITensor *output_to_use = _has_bias ? &_bias_output : output;
+
+ // Permute bias
+ _permute_bias_func.configure(biases, &_permuted_bias, PermutationVector(1U, 2U, 0U));
+ _permuted_bias.info()->set_data_layout(DataLayout::NCHW);
+
+ // Permute input if needed
+ _needs_permute = input->info()->data_layout() == DataLayout::NHWC;
+ if(_needs_permute)
+ {
+ _memory_group.manage(&_permuted_input);
+ // Configure the function to transform the input tensor from NHWC -> NCHW
+ _permute_input_func.configure(input, &_permuted_input, PermutationVector(1U, 2U, 0U));
+ _permuted_input.info()->set_data_layout(DataLayout::NCHW);
+
+ // Configure the function to transform the weights tensor from HWI -> IHW
+ _permute_weights_func.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
+ _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
+
+ input_to_use = &_permuted_input;
+ weights_to_use = &_permuted_weights;
+ }
+
+ // Flip weights
+ _flipped_weights.allocator()->init(weights_to_use->info()->clone()->set_is_resizable(true).reset_padding());
+ _flip_axis.allocator()->init(TensorInfo(TensorShape(2U), 1, DataType::U32));
+ _flip_weights_func.configure(weights_to_use, &_flipped_weights, &_flip_axis);
+
+ // Pad weights
+ const PaddingList padding_w = { { 0, input_dims.x() + pad_valid.x() - 1 }, { 0, input_dims.y() + pad_valid.y() - 1 } };
+ _pad_weights_func.configure(&_flipped_weights, &_padded_weights, padding_w);
+
+ // Transform weights
+ _transform_weights_func = support::cpp14::make_unique<NEFFT2D>();
+ _transform_weights_func->configure(&_padded_weights, &_transformed_weights, FFT2DInfo());
+
+ // Pad input
+ const PaddingList padding_in = { { 0, kernel_size.x() + pad_valid.x() - 1 }, { 0, kernel_size.y() + pad_valid.y() - 1 } };
+ _memory_group.manage(&_padded_input);
+ _pad_input_func.configure(input_to_use, &_padded_input, padding_in);
+ if(_needs_permute)
+ {
+ _permuted_input.allocator()->allocate();
+ }
+
+ // Transform input
+ _memory_group.manage(&_transformed_input);
+ _transform_input_func.configure(&_padded_input, &_transformed_input, FFT2DInfo());
+ _padded_input.allocator()->allocate();
+
+ // Perform product
+ _memory_group.manage(&_output_product);
+ _prod_func.configure(&_transformed_input, &_transformed_weights, &_output_product);
+ _transformed_input.allocator()->allocate();
+
+ // Perform reduction
+ _memory_group.manage(&_output_reduced);
+ _reduce_func.configure(&_output_product, &_output_reduced, 2, ReductionOperation::SUM);
+ _output_product.allocator()->allocate();
+
+ // Transform output
+ _memory_group.manage(&_itransformed_output);
+ FFT2DInfo itranform_info;
+ itranform_info.direction = FFTDirection::Inverse;
+ _itransformed_output.allocator()->init(_output_reduced.info()->clone()->set_is_resizable(true).set_num_channels(1).reset_padding());
+ _itransform_output_func.configure(&_output_reduced, &_itransformed_output, itranform_info);
+ _output_reduced.allocator()->allocate();
+
+ // Reshape output
+ TensorShape reshaped_shape = _itransformed_output.info()->tensor_shape();
+ reshaped_shape.remove_dimension(2);
+ _reshaped_output.allocator()->init(_itransformed_output.info()->clone()->set_tensor_shape(reshaped_shape));
+
+ // Extract correct region
+ const int start_left = kernel_size.x() - conv_info.pad_left() - 1;
+ const int start_top = kernel_size.y() - conv_info.pad_top() - 1;
+ const int end_right = _reshaped_output.info()->tensor_shape().x() - (kernel_size.x() - conv_info.pad_right() - 1) - pad_valid.x();
+ const int end_botton = _reshaped_output.info()->tensor_shape().y() - (kernel_size.y() - conv_info.pad_bottom() - 1) - pad_valid.y();
+ if(_has_bias)
+ {
+ _memory_group.manage(&_bias_output);
+ }
+ else if(_needs_permute)
+ {
+ output_to_use = &_permuted_output;
+ _memory_group.manage(&_permuted_output);
+ }
+ _extract_output_func.configure(&_reshaped_output, output_to_use, Coordinates(start_left, start_top), Coordinates(end_right, end_botton));
+ _reshaped_output.allocator()->allocate();
+ _itransformed_output.allocator()->allocate();
+
+ // Add bias
+ if(biases != nullptr)
+ {
+ output_to_use = output;
+ if(_needs_permute)
+ {
+ output_to_use = &_permuted_output;
+ _memory_group.manage(&_permuted_output);
+ }
+ auto_init_if_empty(*output_to_use->info(), *_bias_output.info());
+ _bias_add_func.configure(&_bias_output, &_permuted_bias, output_to_use, ConvertPolicy::WRAP);
+ _bias_output.allocator()->allocate();
+ }
+
+ // Permute output
+ if(_needs_permute)
+ {
+ // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
+ _permuted_output.info()->set_data_layout(DataLayout::NCHW);
+ _permute_output_func.configure(&_permuted_output, output, PermutationVector(2U, 0U, 1U));
+
+ // Allocate tensors
+ _permuted_output.allocator()->allocate();
+ }
+
+ // Configure Activation Layer
+ _is_activationlayer_enabled = act_info.enabled();
+ if(_is_activationlayer_enabled)
+ {
+ _activation_layer_func.configure(output, nullptr, act_info);
+ }
+
+ // Setup flip axis data
+ _flip_axis.allocator()->allocate();
+
+ auto axis_data = reinterpret_cast<uint32_t *>(_flip_axis.buffer());
+ axis_data[0] = 0;
+ axis_data[1] = 1;
+}
+
+Status NEFFTConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+
+ // Get indices for the width and height
+ const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
+
+ // Input shape, kernel size and output tile
+ const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
+
+ // Strides
+ const auto strides = conv_info.stride();
+ ARM_COMPUTE_RETURN_ERROR_ON(strides.first != strides.second && strides.first != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(kernel_size.x() != kernel_size.y());
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_left() != (kernel_size.x() / 2) || conv_info.pad_right() != (kernel_size.x() / 2));
+ ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_top() != (kernel_size.y() / 2) || conv_info.pad_bottom() != (kernel_size.y() / 2));
+
+ // Validate biases
+ if(biases != nullptr)
+ {
+ const size_t idx_channels = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channels] != biases->tensor_shape().x());
+ }
+
+ // Checks performed when output is configured
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON((input->tensor_shape()[idx_height] != output->tensor_shape()[idx_height]) || (input->tensor_shape()[idx_width] != output->tensor_shape()[idx_width]));
+
+ // Validate Activation Layer
+ if(act_info.enabled())
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
+ }
+ }
+
+ return Status{};
+}
+
+void NEFFTConvolutionLayer::run()
+{
+ prepare();
+
+ MemoryGroupResourceScope scope_mg(_memory_group);
+
+ // Transform input
+ if(_needs_permute)
+ {
+ _permute_input_func.run();
+ }
+ _pad_input_func.run();
+ _transform_input_func.run();
+
+ // Perform operations to frequency domain
+ _prod_func.run();
+
+ _reduce_func.run();
+
+ // Transform output
+ _itransform_output_func.run();
+ _reshaped_output.allocator()->import_memory(_itransformed_output.buffer());
+ _extract_output_func.run();
+
+ // Add bias
+ if(_has_bias)
+ {
+ _bias_add_func.run();
+ }
+ if(_needs_permute)
+ {
+ _permute_output_func.run();
+ }
+
+ // Run activation layer
+ if(_is_activationlayer_enabled)
+ {
+ _activation_layer_func.run();
+ }
+}
+
+void NEFFTConvolutionLayer::prepare()
+{
+ if(!_is_prepared)
+ {
+ // Permute bias to NCHW
+ if(_original_bias != nullptr)
+ {
+ _permuted_bias.allocator()->allocate();
+ _permute_bias_func.run();
+ _original_bias->mark_as_unused();
+ }
+
+ const ITensor *cur_weights = _original_weights;
+
+ // Permute weights
+ if(_needs_permute)
+ {
+ ARM_COMPUTE_ERROR_ON(!cur_weights->is_used());
+
+ _permuted_weights.allocator()->allocate();
+ _permute_weights_func.run();
+ cur_weights->mark_as_unused();
+ cur_weights = &_permuted_weights;
+ }
+
+ // Flip weights
+ _flipped_weights.allocator()->allocate();
+ _flip_weights_func.run();
+ cur_weights->mark_as_unused();
+
+ // Pad weights
+ _padded_weights.allocator()->allocate();
+ _pad_weights_func.run();
+ _flipped_weights.mark_as_unused();
+ _flipped_weights.allocator()->free();
+
+ // Transform weights to frequency domain
+ _transformed_weights.allocator()->allocate();
+ _transform_weights_func->run();
+ _transform_weights_func.reset();
+
+ _padded_weights.mark_as_unused();
+ _padded_weights.allocator()->free();
+
+ _is_prepared = true;
+ }
+}
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
index cf6b984717..ef28fe9260 100644
--- a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
+++ b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,8 +29,8 @@
#include <utility>
-using namespace arm_compute;
-
+namespace arm_compute
+{
void NEPixelWiseMultiplication::configure(ITensor *input1, ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
{
auto k = arm_compute::support::cpp14::make_unique<NEPixelWiseMultiplicationKernel>();
@@ -51,3 +51,27 @@ Status NEPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITen
{
return NEPixelWiseMultiplicationKernel::validate(input1, input2, output, scale, overflow_policy, rounding_policy);
}
+
+void NEComplexPixelWiseMultiplication::configure(ITensor *input1, ITensor *input2, ITensor *output)
+{
+ auto k = arm_compute::support::cpp14::make_unique<NEComplexPixelWiseMultiplicationKernel>();
+ k->configure(input1, input2, output);
+ _kernel = std::move(k);
+
+ if(output->info()->dimension(0) > 1)
+ {
+ ITensor *broadcasted_info = (input1->info()->dimension(0) == 1) ? input1 : input2;
+
+ if(broadcasted_info->info()->dimension(0) == 1)
+ {
+ _border_handler.configure(broadcasted_info, _kernel->border_size(), BorderMode::REPLICATE);
+ }
+ }
+}
+
+Status NEComplexPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+{
+ return NEComplexPixelWiseMultiplicationKernel::validate(input1, input2, output);
+}
+
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEReductionOperation.cpp b/src/runtime/NEON/functions/NEReductionOperation.cpp
index 9f81a403f5..a0aed96521 100644
--- a/src/runtime/NEON/functions/NEReductionOperation.cpp
+++ b/src/runtime/NEON/functions/NEReductionOperation.cpp
@@ -66,7 +66,8 @@ Status NEReductionOperation::validate(const ITensorInfo *input, const ITensorInf
void NEReductionOperation::configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_THROW_ON(NEReductionOperation::validate(input->info(), output->info(), axis, op));
// Configure reduction kernel
_reduction_kernel.configure(input, output, axis, op);
diff --git a/tests/benchmark/NEON/ConvolutionLayer.cpp b/tests/benchmark/NEON/ConvolutionLayer.cpp
index ac27e7ad31..2ae893bdec 100644
--- a/tests/benchmark/NEON/ConvolutionLayer.cpp
+++ b/tests/benchmark/NEON/ConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,17 +24,21 @@
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
#include "tests/benchmark/fixtures/ConvolutionLayerFixture.h"
+#include "tests/benchmark/fixtures/FFTConvolutionLayerFixture.h"
#include "tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h"
+#include "tests/datasets/SmallConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/alexnet/AlexNetConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/googlenet/inceptionv1/GoogLeNetInceptionV1ConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/googlenet/inceptionv4/GoogLeNetInceptionV4ConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/lenet5/LeNet5ConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/mobilenet/MobileNetConvolutionLayerDataset.h"
+#include "tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/squeezenet/SqueezeNetConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/vgg/vgg16/VGG16ConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/yolo/v2/YOLOV2ConvolutionLayerDataset.h"
@@ -59,6 +63,7 @@ const auto data_types = framework::dataset::make("DataType", { DataType::F32, Da
} // namespace
using NEGEMMConvolutionLayerFixture = ConvolutionLayerFixture<Tensor, NEGEMMConvolutionLayer, Accessor>;
+using NEFFTConvolutionLayerFixture = FFTConvolutionLayerFixture<Tensor, NEFFTConvolutionLayer, Accessor>;
TEST_SUITE(NEON)
#if defined(__aarch64__)
@@ -89,6 +94,12 @@ REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetWinogradLayer, NEWinogradConvolutionLa
framework::dataset::make("Batches", 1)));
#endif /* __aarch64__ */
+REGISTER_FIXTURE_DATA_TEST_CASE(ResNet12FFTLayer, NEFFTConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::ResNet12FFTConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ framework::dataset::make("Batches", 1)));
+
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
diff --git a/tests/benchmark/NEON/FFT.cpp b/tests/benchmark/NEON/FFT.cpp
new file mode 100644
index 0000000000..c9889fda02
--- /dev/null
+++ b/tests/benchmark/NEON/FFT.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEFFT1D.h"
+#include "arm_compute/runtime/NEON/functions/NEFFT2D.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/benchmark/fixtures/FFTFixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "utils/TypePrinter.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace benchmark
+{
+namespace
+{
+const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
+const auto shapes = framework::dataset::make("Shapes", { TensorShape(192U, 128U, 64U), TensorShape(224U, 224U) });
+} // namespace
+
+using NEFFT1DFixture = FFTFixture<Tensor, NEFFT1D, FFT1DInfo, Accessor>;
+using NEFFT2DFixture = FFTFixture<Tensor, NEFFT2D, FFT2DInfo, Accessor>;
+
+TEST_SUITE(CL)
+
+REGISTER_FIXTURE_DATA_TEST_CASE(FFT1D, NEFFT1DFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(shapes, data_types));
+
+REGISTER_FIXTURE_DATA_TEST_CASE(FFT2D, NEFFT2DFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(shapes, data_types));
+
+TEST_SUITE_END() // CL
+} // namespace benchmark
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/NEON/FFT.cpp b/tests/validation/NEON/FFT.cpp
index 598c8bb10d..d762630d30 100644
--- a/tests/validation/NEON/FFT.cpp
+++ b/tests/validation/NEON/FFT.cpp
@@ -24,8 +24,10 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/NEON/functions/NEFFT1D.h"
#include "arm_compute/runtime/NEON/functions/NEFFT2D.h"
+#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "tests/NEON/Accessor.h"
+#include "tests/datasets/SmallConvolutionLayerDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
@@ -201,6 +203,25 @@ TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
TEST_SUITE_END() // FFT2D
+TEST_SUITE(FFTConvolutionLayer)
+
+template <typename T>
+using NEFFTConvolutionLayerFixture = FFTConvolutionValidationFixture<Tensor, Accessor, NEFFTConvolutionLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEFFTConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ ActivationFunctionsSmallDataset))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f32, tolerance_num);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // FFTConvolutionLayer
+
TEST_SUITE_END() // NEON
} // namespace validation
} // namespace test