aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/cl_kernels/helpers_asymm.h6
-rw-r--r--src/core/NEON/INEKernel.h4
-rw-r--r--src/core/NEON/INESimpleKernel.h4
-rw-r--r--src/core/NEON/NEKernels.h2
-rw-r--r--src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEBox3x3Kernel.h8
-rw-r--r--src/core/NEON/kernels/NECannyEdgeKernel.h6
-rw-r--r--src/core/NEON/kernels/NEChannelShuffleLayerKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NECol2ImKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NECol2ImKernel.h4
-rw-r--r--src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h2
-rw-r--r--src/core/NEON/kernels/NEConvolutionKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEDerivativeKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h4
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h4
-rw-r--r--src/core/NEON/kernels/NEFastCornersKernel.h4
-rw-r--r--src/core/NEON/kernels/NEFillBorderKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpReductionKernel.h8
-rw-r--r--src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGatherKernel.h4
-rw-r--r--src/core/NEON/kernels/NEGaussian3x3Kernel.h4
-rw-r--r--src/core/NEON/kernels/NEGaussian5x5Kernel.h6
-rw-r--r--src/core/NEON/kernels/NEGaussianPyramidKernel.h6
-rw-r--r--src/core/NEON/kernels/NEHOGDescriptorKernel.h6
-rw-r--r--src/core/NEON/kernels/NEHOGDetectorKernel.h4
-rw-r--r--src/core/NEON/kernels/NEHarrisCornersKernel.h4
-rw-r--r--src/core/NEON/kernels/NEIm2ColKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NELKTrackerKernel.h4
-rw-r--r--src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h8
-rw-r--r--src/core/NEON/kernels/NENormalizationLayerKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEPadLayerKernel.h4
-rw-r--r--src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h4
-rw-r--r--src/core/NEON/kernels/NERangeKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEReductionOperationKernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEReductionOperationKernel.h4
-rw-r--r--src/core/NEON/kernels/NERemapKernel.h4
-rw-r--r--src/core/NEON/kernels/NEReorgLayerKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEReverseKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEScaleKernel.h2
-rw-r--r--src/core/NEON/kernels/NEStackLayerKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEStackLayerKernel.h4
-rw-r--r--src/core/NEON/kernels/NEThresholdKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NETileKernel.h4
-rw-r--r--src/core/NEON/kernels/NETransposeKernel.cpp10
-rw-r--r--src/core/NEON/kernels/NETransposeKernel.h4
-rw-r--r--src/core/NEON/kernels/NEWeightsReshapeKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEWeightsReshapeKernel.h4
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h16
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp24
-rw-r--r--src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp4
-rw-r--r--src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp2
-rw-r--r--src/core/NEON/kernels/detail/NEActivationFunctionDetail.h28
-rw-r--r--src/core/NEON/wrapper/traits.h4
-rw-r--r--src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp2
-rw-r--r--src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp2
-rw-r--r--src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp2
-rw-r--r--src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp2
-rw-r--r--src/core/cpu/kernels/CpuReshapeKernel.cpp2
-rw-r--r--src/core/cpu/kernels/activation/NEON/fp16.cpp4
-rw-r--r--src/core/cpu/kernels/activation/NEON/fp32.cpp2
-rw-r--r--src/core/cpu/kernels/add/neon/list.h4
-rw-r--r--src/core/cpu/kernels/pooling/neon/quantized.h8
-rw-r--r--src/core/cpu/kernels/softmax/impl/NEON/list.h4
-rw-r--r--src/core/cpu/kernels/sub/neon/list.h4
-rw-r--r--src/graph/backends/NEON/NEDeviceBackend.cpp8
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp8
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp4
-rw-r--r--src/runtime/NEON/functions/NEConvolutionLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp2
-rw-r--r--src/runtime/NEON/functions/NEGEMMConv2d.cpp4
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp4
-rw-r--r--src/runtime/cpu/operators/CpuPooling.h2
89 files changed, 219 insertions, 219 deletions
diff --git a/src/core/CL/cl_kernels/helpers_asymm.h b/src/core/CL/cl_kernels/helpers_asymm.h
index 59c8fa606d..eea4458170 100644
--- a/src/core/CL/cl_kernels/helpers_asymm.h
+++ b/src/core/CL/cl_kernels/helpers_asymm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -192,7 +192,7 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
/** Each bit of the result is set to the corresponding bit of either then_val or
* else_val depending on whether the corresponding bit of if_mask is set.
- * Equivalent to the VBSL instruction in ARM NEON.
+ * Equivalent to the VBSL instruction in Arm Neon.
*
* @param[in] size Size of vector.
*
@@ -320,7 +320,7 @@ inline float dequantize_qasymm8_signed(char input, float offset, float scale)
}
/** Calculates (a+b)/2, rounded to the nearest integer.
- * Equivalent to VRHADD in the ARM NEON instruction set.
+ * Equivalent to VRHADD in the Arm Neon instruction set.
*
* @param[in] size Size of vector.
*
diff --git a/src/core/NEON/INEKernel.h b/src/core/NEON/INEKernel.h
index 7ad20166d8..dde8310ecf 100644
--- a/src/core/NEON/INEKernel.h
+++ b/src/core/NEON/INEKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,7 +28,7 @@
namespace arm_compute
{
-/** Common interface for all kernels implemented in NEON. */
+/** Common interface for all kernels implemented in Neon. */
using INEKernel = ICPPKernel;
} // namespace arm_compute
#endif /*ARM_COMPUTE_INEKERNEL_H */
diff --git a/src/core/NEON/INESimpleKernel.h b/src/core/NEON/INESimpleKernel.h
index da32d6619e..d2b6de427b 100644
--- a/src/core/NEON/INESimpleKernel.h
+++ b/src/core/NEON/INESimpleKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,7 +28,7 @@
namespace arm_compute
{
-/** Interface for simple NEON kernels having 1 tensor input and 1 tensor output */
+/** Interface for simple Neon kernels having 1 tensor input and 1 tensor output */
using INESimpleKernel = ICPPSimpleKernel;
} // namespace arm_compute
#endif /*ARM_COMPUTE_INESIMPLEKERNEL_H */
diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h
index 66309f9296..b962c9eeee 100644
--- a/src/core/NEON/NEKernels.h
+++ b/src/core/NEON/NEKernels.h
@@ -24,7 +24,7 @@
#ifndef ARM_COMPUTE_NEKERNELS_H
#define ARM_COMPUTE_NEKERNELS_H
-/* Header regrouping all the NEON kernels */
+/* Header regrouping all the Neon kernels */
#include "src/core/NEON/kernels/NEAbsoluteDifferenceKernel.h"
#include "src/core/NEON/kernels/NEAccumulateKernel.h"
#include "src/core/NEON/kernels/NEBatchNormalizationLayerKernel.h"
diff --git a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
index 6f2c72a970..7f707d7f1a 100644
--- a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
@@ -148,7 +148,7 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const IT
template <typename T, bool fused_activation, typename F>
void NEBatchNormalizationLayerKernel::batch_normalization_nchw(const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
const int window_step_x = 16 / sizeof(T);
@@ -164,7 +164,7 @@ void NEBatchNormalizationLayerKernel::batch_normalization_nchw(const Window &win
F activation_functor(_act_info);
// Hold information about the current feature map we are iterating.
- // Only compute denominator and NEON vectors once per feature map.
+ // Only compute denominator and Neon vectors once per feature map.
int slice = -1;
const auto input_mean = reinterpret_cast<const T *>(_mean->ptr_to_element(Coordinates(0, 0)));
diff --git a/src/core/NEON/kernels/NEBox3x3Kernel.h b/src/core/NEON/kernels/NEBox3x3Kernel.h
index f6a64a7bb4..4f9ac18219 100644
--- a/src/core/NEON/kernels/NEBox3x3Kernel.h
+++ b/src/core/NEON/kernels/NEBox3x3Kernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform a Box 3x3 filter */
+/** Neon kernel to perform a Box 3x3 filter */
class NEBox3x3Kernel : public INESimpleKernel
{
public:
@@ -63,7 +63,7 @@ public:
};
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-/** NEON kernel to perform a Box 3x3 filter for FP16 datatype
+/** Neon kernel to perform a Box 3x3 filter for FP16 datatype
*/
class NEBox3x3FP16Kernel : public NEBox3x3Kernel
{
@@ -88,7 +88,7 @@ public:
void run(const Window &window, const ThreadInfo &info) override;
};
#else /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-/** NEON kernel to perform a Box 3x3 filter for FP16 datatype */
+/** Neon kernel to perform a Box 3x3 filter for FP16 datatype */
using NEBox3x3FP16Kernel = NEBox3x3Kernel;
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NECannyEdgeKernel.h b/src/core/NEON/kernels/NECannyEdgeKernel.h
index eff735259d..f1d24410f7 100644
--- a/src/core/NEON/kernels/NECannyEdgeKernel.h
+++ b/src/core/NEON/kernels/NECannyEdgeKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -85,7 +85,7 @@ protected:
ITensor *_phase; /**< Destination tensor - Quantized phase */
};
-/** NEON kernel to perform Non-Maxima suppression for Canny Edge.
+/** Neon kernel to perform Non-Maxima suppression for Canny Edge.
*
* @note This kernel is meant to be used alongside CannyEdge and performs a non-maxima suppression using magnitude and phase of input
* to characterize points as possible edges. Thus, at the end, each point will be set to EDGE, NO_EDGE or MAYBE.
@@ -148,7 +148,7 @@ private:
int32_t _upper_thr; /**< Upper threshold used for the hysteresis */
};
-/** NEON kernel to perform Edge tracing */
+/** Neon kernel to perform Edge tracing */
class NEEdgeTraceKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEChannelShuffleLayerKernel.cpp b/src/core/NEON/kernels/NEChannelShuffleLayerKernel.cpp
index 6e16f24956..4a2213a30b 100644
--- a/src/core/NEON/kernels/NEChannelShuffleLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEChannelShuffleLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,7 @@ namespace
{
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_groups)
{
- // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NCHW, DataLayout::NHWC);
diff --git a/src/core/NEON/kernels/NECol2ImKernel.cpp b/src/core/NEON/kernels/NECol2ImKernel.cpp
index 97b68d1321..bbc86a9a05 100644
--- a/src/core/NEON/kernels/NECol2ImKernel.cpp
+++ b/src/core/NEON/kernels/NECol2ImKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,7 @@ namespace
{
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims)
{
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
// Validate configured output
diff --git a/src/core/NEON/kernels/NECol2ImKernel.h b/src/core/NEON/kernels/NECol2ImKernel.h
index 59d1d741b6..00a519d229 100644
--- a/src/core/NEON/kernels/NECol2ImKernel.h
+++ b/src/core/NEON/kernels/NECol2ImKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,7 +32,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform col2im reshaping.
+/** Neon kernel to perform col2im reshaping.
*
* Rearranges each matrix column into image blocks. It's the inverse operation of @ref NEIm2ColKernel.
*
diff --git a/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp b/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp
index 597c283a9c..101d1384d0 100644
--- a/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp
+++ b/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,7 +69,7 @@ Status NEConvertFullyConnectedWeightsKernel::validate(const ITensorInfo *input,
DataLayout data_layout)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() != 2);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != original_input_shape.total_size_lower(3));
diff --git a/src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h b/src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h
index 39e5300238..0be1fbe5aa 100644
--- a/src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h
+++ b/src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h
@@ -32,7 +32,7 @@ namespace arm_compute
// Forward declarations
class ITensor;
-/** NEON kernel to convert asymmetric signed to asymmetric signed and vice-versa */
+/** Neon kernel to convert asymmetric signed to asymmetric signed and vice-versa */
class NEConvertQuantizedSignednessKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEConvolutionKernel.cpp b/src/core/NEON/kernels/NEConvolutionKernel.cpp
index bac27430f9..075de41203 100644
--- a/src/core/NEON/kernels/NEConvolutionKernel.cpp
+++ b/src/core/NEON/kernels/NEConvolutionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -374,7 +374,7 @@ void NEConvolutionKernel<3>::convolution(const Window &win)
Iterator input(_input, win);
Iterator output(_output, win);
- // Load the matrix's coefficients into NEON registers:
+ // Load the matrix's coefficients into Neon registers:
const int16x4_t mat00 = vld1_dup_s16(_convolution.data());
const int16x4_t mat01 = vld1_dup_s16(_convolution.data() + 1);
const int16x4_t mat02 = vld1_dup_s16(_convolution.data() + 2);
diff --git a/src/core/NEON/kernels/NEDerivativeKernel.cpp b/src/core/NEON/kernels/NEDerivativeKernel.cpp
index 8d641a33b9..e5780ea264 100644
--- a/src/core/NEON/kernels/NEDerivativeKernel.cpp
+++ b/src/core/NEON/kernels/NEDerivativeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -83,11 +83,11 @@ void NEDerivativeKernel::configure(const ITensor *input, ITensor *output_x, ITen
AccessWindowHorizontal out_x_access(output_x == nullptr ? nullptr : output_x->info(), 0, num_elems_processed_per_iteration);
AccessWindowHorizontal out_y_access(output_y == nullptr ? nullptr : output_y->info(), 0, num_elems_processed_per_iteration);
- // TODO(COMPMID-1503) Fix x-access input bug in NEON kernel instead of '+2'
+ // TODO(COMPMID-1503) Fix x-access input bug in Neon kernel instead of '+2'
AccessWindowHorizontal in_x_access(input->info(), -border_size().left, num_elems_processed_per_iteration + 2);
AccessWindowRectangle in_y_access(input->info(), 0, -border_size().left, num_elems_processed_per_iteration, num_rows_read_per_iteration);
- // TODO(COMPMID-1503) Fix x-access input bug in NEON kernel instead of '+2'
+ // TODO(COMPMID-1503) Fix x-access input bug in Neon kernel instead of '+2'
AccessWindowRectangle in_xy_access(input->info(), -border_size().left, -border_size().top, num_elems_processed_per_iteration + 2, num_rows_read_per_iteration);
if(run_der_x && run_der_y)
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
index 87b9fb1bf1..77742163fa 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -149,7 +149,7 @@ inline bool run_optim_small_tensor(const ITensor *t)
// Optimized convolver for 1x1 kernels used only where input width and height are both <= 8
// For big Z as in Input=7x7x832, this implementation is faster than the general code becuase it doesn't need to
-// store intermidiate results in memory. Temporary results are stored in NEON registers directly and then written to the output buffer.
+// store intermidiate results in memory. Temporary results are stored in Neon registers directly and then written to the output buffer.
template <unsigned int stridex>
class convolver_w1x1_i8x8_f32
{
@@ -1381,4 +1381,4 @@ void NEDirectConvolutionLayerKernel::run(const Window &window, const ThreadInfo
}
}
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
index 94c97cf521..58d385a138 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON interface for Direct Convolution Layer kernel */
+/** Neon interface for Direct Convolution Layer kernel */
class NEDirectConvolutionLayerKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
index de5a88e812..f072851240 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -93,7 +93,7 @@ typename std::enable_if<arm_compute::utils::traits::is_floating_point<T>::value,
output_stage_nchw(ITensor *input, const ITensor *bias, const Window &window, ITensor *output,
int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, bool has_bias)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::UNKNOWN);
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
index b1b88103bf..cd0710d0c1 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@
namespace arm_compute
{
class ITensor;
-/** NEON kernel to accumulate the biases, if provided, or downscale in case of quantized input.
+/** Neon kernel to accumulate the biases, if provided, or downscale in case of quantized input.
*
* @note We assume bias to be shared
* @note For quantized computations (i.e. @p input of S32 type) the output data type for auto-initialization must be passed as part
diff --git a/src/core/NEON/kernels/NEFastCornersKernel.h b/src/core/NEON/kernels/NEFastCornersKernel.h
index a4086afb0c..f981d72a03 100644
--- a/src/core/NEON/kernels/NEFastCornersKernel.h
+++ b/src/core/NEON/kernels/NEFastCornersKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,7 +34,7 @@ namespace arm_compute
class ITensor;
using IImage = ITensor;
-/** NEON kernel to perform fast corners */
+/** Neon kernel to perform fast corners */
class NEFastCornersKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEFillBorderKernel.cpp b/src/core/NEON/kernels/NEFillBorderKernel.cpp
index 10384d4176..70178dffc0 100644
--- a/src/core/NEON/kernels/NEFillBorderKernel.cpp
+++ b/src/core/NEON/kernels/NEFillBorderKernel.cpp
@@ -103,7 +103,7 @@ void NEFillBorderKernel::configure(ITensor *tensor, BorderSize border_size, Bord
void NEFillBorderKernel::configure(ITensorInfo *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_ERROR_ON(tensor->data_type() == DataType::UNKNOWN);
_border_size = border_size;
diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
index 5d178ea85b..6b2383f043 100644
--- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,7 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
index 85939ebae9..92fbd12a54 100644
--- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
+++ b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to interleave the elements of a matrix
+/** Neon kernel to interleave the elements of a matrix
*
* This function puts the values in a 4x4 block of Matrix A on the same row (Interleaved values)
*
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
index 14d03fe3eb..dfdb7b3236 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to multiply matrices
+/** Neon kernel to multiply matrices
*
* @note @ref NEGEMMLowpMatrixMultiplyKernel low precision matrix product kernel
* This kernel performs the following computation:
diff --git a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h
index 0f37e584b9..9911ffc0f4 100644
--- a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel used to add the offset contribution after @ref NEGEMMLowpMatrixMultiplyKernel. The computation is performed in-place
+/** Neon kernel used to add the offset contribution after @ref NEGEMMLowpMatrixMultiplyKernel. The computation is performed in-place
*
* This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel),
* and adds to it the offset contribution of matrix A and matrix B in-place.
diff --git a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h
index 4c68fb0943..39fbd8eb0e 100644
--- a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel used to add the offset contribution and perform the output stage after @ref NEGEMMLowpMatrixMultiplyKernel.
+/** Neon kernel used to add the offset contribution and perform the output stage after @ref NEGEMMLowpMatrixMultiplyKernel.
*
* The computation is performed in-place
*
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h
index 42ef570f77..63d80aaf1b 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
+/** Neon kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
*
* This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
* The following computations will be performed by the kernel:
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h
index d04e713cb8..8e92ba6eca 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QSYMM16
+/** Neon kernel used to quantize down the int32 accumulator values of GEMMLowp to QSYMM16
*
* This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QSYMM16 value.
* The following computations will be performed by the kernel:
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h
index 55c07fbb5a..9b51a3ba84 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8_SIGNED
+/** Neon kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8_SIGNED
*
* This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8_SIGNED value.
* The following computations will be performed by the kernel:
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
index 1a8de1c441..4d43afaab2 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8
+/** Neon kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8
*
* This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value.
* The following computations will be performed by the kernel:
diff --git a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.h b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.h
index 655658cb6c..521adbfca4 100644
--- a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,7 +32,7 @@ namespace arm_compute
class ITensor;
struct GEMMLowpReductionKernelInfo;
-/** Common interface for all NEON reduction kernels */
+/** Common interface for all Neon reduction kernels */
class INEGEMMLowpReductionKernel : public INEKernel
{
public:
@@ -69,7 +69,7 @@ protected:
bool _mul_by_scalar;
};
-/** NEON kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
+/** Neon kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
*
* @note This stage is needed to handle the offset of matrix product
* https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
@@ -130,7 +130,7 @@ private:
void run_internal(const Window &window);
};
-/** NEON kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
+/** Neon kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
*
* @note This stage is needed to handle the offset of matrix product
* https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h
index 48377838d2..f9ff143e07 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h
+++ b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
+/** Neon kernel to perform the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
*
* @note [ MTX_OUT = MTX_0 + beta * MTX_1 ] with MTX_0 and MTX_1 of the same size
*
diff --git a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h
index 1ea948de63..e2945ee117 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h
+++ b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to multiply two input matrices "A" and "B". All elements of the output matrix/vector will be multiplied by alpha after the matrix multiplication
+/** Neon kernel to multiply two input matrices "A" and "B". All elements of the output matrix/vector will be multiplied by alpha after the matrix multiplication
*
* @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref NEGEMMInterleave4x4Kernel" and @ref NEGEMMTranspose1xWKernel
* @note If the output tensor is a vector and the data type is F32, the implementation assumes that the first input tensor @p input0 is a vector and the second input tensor @p input1 a matrix. The implementation also assumes that both tensors have not been reshaped
diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
index 6d9f921b02..cea95749f8 100644
--- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,7 +51,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
if(output->total_size() != 0)
{
diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h
index 7120943a90..583588a1c1 100644
--- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h
+++ b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,7 +31,7 @@ namespace arm_compute
// Forward declarations
class ITensor;
-/** NEON kernel which transposes the elements of a matrix in chunks of 1xW, where W is equal to (16 / element size of the tensor)
+/** Neon kernel which transposes the elements of a matrix in chunks of 1xW, where W is equal to (16 / element size of the tensor)
*
* Following an example of how the transposition1xW works when the input data is F32
*
diff --git a/src/core/NEON/kernels/NEGatherKernel.h b/src/core/NEON/kernels/NEGatherKernel.h
index d81e34c39c..46b41b28e3 100644
--- a/src/core/NEON/kernels/NEGatherKernel.h
+++ b/src/core/NEON/kernels/NEGatherKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,7 +33,7 @@ namespace arm_compute
// Forward declarations
class ITensor;
-/** Kernel to perform other operation on NEON */
+/** Kernel to perform other operation on Neon */
class NEGatherKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEGaussian3x3Kernel.h b/src/core/NEON/kernels/NEGaussian3x3Kernel.h
index 8973b48e7a..7ceea2e7c1 100644
--- a/src/core/NEON/kernels/NEGaussian3x3Kernel.h
+++ b/src/core/NEON/kernels/NEGaussian3x3Kernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform a Gaussian 3x3 filter */
+/** Neon kernel to perform a Gaussian 3x3 filter */
class NEGaussian3x3Kernel : public INESimpleKernel
{
public:
diff --git a/src/core/NEON/kernels/NEGaussian5x5Kernel.h b/src/core/NEON/kernels/NEGaussian5x5Kernel.h
index f4bca55637..2c7262f827 100644
--- a/src/core/NEON/kernels/NEGaussian5x5Kernel.h
+++ b/src/core/NEON/kernels/NEGaussian5x5Kernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform a Gaussian 5x5 filter (horizontal pass) */
+/** Neon kernel to perform a Gaussian 5x5 filter (horizontal pass) */
class NEGaussian5x5HorKernel : public INESimpleKernel
{
public:
@@ -67,7 +67,7 @@ private:
BorderSize _border_size;
};
-/** NEON kernel to perform a Gaussian 5x5 filter (vertical pass) */
+/** Neon kernel to perform a Gaussian 5x5 filter (vertical pass) */
class NEGaussian5x5VertKernel : public INESimpleKernel
{
public:
diff --git a/src/core/NEON/kernels/NEGaussianPyramidKernel.h b/src/core/NEON/kernels/NEGaussianPyramidKernel.h
index e852db2699..d943990535 100644
--- a/src/core/NEON/kernels/NEGaussianPyramidKernel.h
+++ b/src/core/NEON/kernels/NEGaussianPyramidKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform a GaussianPyramid (horizontal pass) */
+/** Neon kernel to perform a GaussianPyramid (horizontal pass) */
class NEGaussianPyramidHorKernel : public INESimpleKernel
{
public:
@@ -66,7 +66,7 @@ private:
int _l2_load_offset;
};
-/** NEON kernel to perform a GaussianPyramid (vertical pass) */
+/** Neon kernel to perform a GaussianPyramid (vertical pass) */
class NEGaussianPyramidVertKernel : public INESimpleKernel
{
public:
diff --git a/src/core/NEON/kernels/NEHOGDescriptorKernel.h b/src/core/NEON/kernels/NEHOGDescriptorKernel.h
index 7845bc2cdf..e9cd47b099 100644
--- a/src/core/NEON/kernels/NEHOGDescriptorKernel.h
+++ b/src/core/NEON/kernels/NEHOGDescriptorKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,7 +32,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform HOG Orientation Binning */
+/** Neon kernel to perform HOG Orientation Binning */
class NEHOGOrientationBinningKernel : public INEKernel
{
public:
@@ -91,7 +91,7 @@ private:
float _phase_scale;
};
-/** NEON kernel to perform HOG block normalization */
+/** Neon kernel to perform HOG block normalization */
class NEHOGBlockNormalizationKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEHOGDetectorKernel.h b/src/core/NEON/kernels/NEHOGDetectorKernel.h
index 45c28099c8..e4c699fbfb 100644
--- a/src/core/NEON/kernels/NEHOGDetectorKernel.h
+++ b/src/core/NEON/kernels/NEHOGDetectorKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,7 +33,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform HOG detector kernel using linear SVM */
+/** Neon kernel to perform HOG detector kernel using linear SVM */
class NEHOGDetectorKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEHarrisCornersKernel.h b/src/core/NEON/kernels/NEHarrisCornersKernel.h
index 4b794107a2..85f80878cc 100644
--- a/src/core/NEON/kernels/NEHarrisCornersKernel.h
+++ b/src/core/NEON/kernels/NEHarrisCornersKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -76,7 +76,7 @@ protected:
BorderSize _border_size; /**< Border size */
};
-/** Template NEON kernel to perform Harris Score.
+/** Template Neon kernel to perform Harris Score.
* The implementation supports 3, 5, and 7 for the block_size
*/
template <int32_t block_size>
diff --git a/src/core/NEON/kernels/NEIm2ColKernel.cpp b/src/core/NEON/kernels/NEIm2ColKernel.cpp
index 72821c890d..728cecd074 100644
--- a/src/core/NEON/kernels/NEIm2ColKernel.cpp
+++ b/src/core/NEON/kernels/NEIm2ColKernel.cpp
@@ -55,7 +55,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::BFLOAT16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(input->data_type()) && has_bias);
ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1));
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups > 1, "Number of groups greater than one are not supported on NEON");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups > 1, "Number of groups greater than one are not supported on Neon");
// Since there's no implicit padding added, check the total input spatial dimensions (with conv paddings) are big enough for the kernel dimensions
const unsigned int width_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
diff --git a/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp
index 08bf6f0e76..a3f7d8d634 100644
--- a/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,7 +82,7 @@ inline float16x8_t vector_float_norm(const float16x8_t &inputs, const float32x4_
template <typename T, typename AccType = T>
void instance_normalization_nchw(ITensor *input, ITensor *output, float gamma, float beta, float epsilon, const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
// Clear X/Y dimensions on execution window as we handle the planes manually
diff --git a/src/core/NEON/kernels/NELKTrackerKernel.h b/src/core/NEON/kernels/NELKTrackerKernel.h
index c24166c042..bc4f6ce296 100644
--- a/src/core/NEON/kernels/NELKTrackerKernel.h
+++ b/src/core/NEON/kernels/NELKTrackerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,7 @@ namespace arm_compute
{
class ITensor;
-/** Interface for NEON Array of Internal Key Points. */
+/** Interface for Neon Array of Internal Key Points. */
using INELKInternalKeypointArray = IArray<NELKInternalKeypoint>;
/** Interface for the Lucas-Kanade tracker kernel */
diff --git a/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h b/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h
index d32dfecfeb..7b0bc0c720 100644
--- a/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h
+++ b/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,7 +32,7 @@ namespace arm_compute
{
class ITensor;
-/** Interface to perform Non-Maxima suppression over a 3x3 window using NEON
+/** Interface to perform Non-Maxima suppression over a 3x3 window using Neon
*
* @note Used by @ref NEFastCorners and @ref NEHarrisCorners
*/
@@ -83,7 +83,7 @@ protected:
};
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-/** NEON kernel to perform Non-Maxima suppression 3x3 with intermediate results in FP16 if the input data type is FP32
+/** Neon kernel to perform Non-Maxima suppression 3x3 with intermediate results in FP16 if the input data type is FP32
*/
class NENonMaximaSuppression3x3FP16Kernel : public NENonMaximaSuppression3x3Kernel
{
@@ -101,7 +101,7 @@ public:
void configure(const ITensor *input, ITensor *output, bool border_undefined);
};
#else /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-/** NEON kernel to perform Non-Maxima suppression 3x3 with intermediate results in FP16 if the input data type is FP32 */
+/** Neon kernel to perform Non-Maxima suppression 3x3 with intermediate results in FP16 if the input data type is FP32 */
using NENonMaximaSuppression3x3FP16Kernel = NENonMaximaSuppression3x3Kernel;
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp
index 27464d5b42..a7900ee074 100644
--- a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -173,7 +173,7 @@ void NENormalizationLayerKernel::configure(const ITensor *input, const ITensor *
template <typename T, unsigned int S, unsigned int dim, bool do_2D_norm>
void NENormalizationLayerKernel::normalize_float(const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
Window win(window);
@@ -295,4 +295,4 @@ void NENormalizationLayerKernel::run(const Window &window, const ThreadInfo &inf
// Run function
(this->*_func)(window);
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEPadLayerKernel.h b/src/core/NEON/kernels/NEPadLayerKernel.h
index ec4bdffdcd..af0dbfdc64 100644
--- a/src/core/NEON/kernels/NEPadLayerKernel.h
+++ b/src/core/NEON/kernels/NEPadLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to add padding to a tensor
+/** Neon kernel to add padding to a tensor
*
* Add padding given padding information
*/
diff --git a/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h b/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h
index ba68171a59..5522ae889a 100644
--- a/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h
+++ b/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,7 +31,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform layer normalization */
+/** Neon kernel to perform layer normalization */
class NEQLSTMLayerNormalizationKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NERangeKernel.cpp b/src/core/NEON/kernels/NERangeKernel.cpp
index 8d11122ab2..f62c868dff 100644
--- a/src/core/NEON/kernels/NERangeKernel.cpp
+++ b/src/core/NEON/kernels/NERangeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,7 +43,7 @@ namespace
template <typename T>
void range_function(ITensor *output, float start, float step, const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector<T, wrapper::traits::BitWidth::W128>::tag_type;
const auto step_vec = wrapper::vdup_n(static_cast<T>(step), ExactTagType{});
@@ -189,4 +189,4 @@ void NERangeKernel::run(const Window &window, const ThreadInfo &info)
(*_func)(_output, _start, _step, window);
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEReductionOperationKernel.cpp b/src/core/NEON/kernels/NEReductionOperationKernel.cpp
index 3d105cc60d..001025bdf5 100644
--- a/src/core/NEON/kernels/NEReductionOperationKernel.cpp
+++ b/src/core/NEON/kernels/NEReductionOperationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -386,7 +386,7 @@ public:
template <typename T, int S>
struct RedOpX
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
inline void operator()(const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, const ReductionOperation op)
@@ -859,7 +859,7 @@ struct RedOpX_quantized
template <typename T, int S>
struct RedOpYZW
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
using neon_vector = typename wrapper::traits::neon_vector<T, S>::type;
@@ -1078,7 +1078,7 @@ struct RedOpYZW
template <typename T, int S, int axis, ReductionOperation op>
struct RedOpYZW_complex
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
using neon_vector = typename wrapper::traits::neon_vector<T, S>::type;
diff --git a/src/core/NEON/kernels/NEReductionOperationKernel.h b/src/core/NEON/kernels/NEReductionOperationKernel.h
index dfc105adae..c7ed0070be 100644
--- a/src/core/NEON/kernels/NEReductionOperationKernel.h
+++ b/src/core/NEON/kernels/NEReductionOperationKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform a reduction operation
+/** Neon kernel to perform a reduction operation
*
* @note For ARG_MIN/ARG_MAX reduction, the default data type for an uninitialized
* output tensor is signed 32-bit integer (S32). It is the user's responsibility
diff --git a/src/core/NEON/kernels/NERemapKernel.h b/src/core/NEON/kernels/NERemapKernel.h
index 8fe1ba5855..adc7f4bdd5 100644
--- a/src/core/NEON/kernels/NERemapKernel.h
+++ b/src/core/NEON/kernels/NERemapKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,7 +31,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform a remap on a tensor */
+/** Neon kernel to perform a remap on a tensor */
class NERemapKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEReorgLayerKernel.cpp b/src/core/NEON/kernels/NEReorgLayerKernel.cpp
index 0dcb439665..215debe10f 100644
--- a/src/core/NEON/kernels/NEReorgLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEReorgLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,7 @@ namespace
{
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t stride)
{
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN);
diff --git a/src/core/NEON/kernels/NEReverseKernel.cpp b/src/core/NEON/kernels/NEReverseKernel.cpp
index 21c758053a..b2fce0f56d 100644
--- a/src/core/NEON/kernels/NEReverseKernel.cpp
+++ b/src/core/NEON/kernels/NEReverseKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,7 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, axis);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(axis, 1, DataType::U32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis->num_dimensions() > 1, "Axis must be a 1D tensor");
@@ -163,4 +163,4 @@ void NEReverseKernel::run(const Window &window, const ThreadInfo &info)
ARM_COMPUTE_ERROR("Element size not supported");
}
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEScaleKernel.h b/src/core/NEON/kernels/NEScaleKernel.h
index f6ee3fa4c5..32fa8d7fb2 100644
--- a/src/core/NEON/kernels/NEScaleKernel.h
+++ b/src/core/NEON/kernels/NEScaleKernel.h
@@ -31,7 +31,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform scaling on a tensor */
+/** Neon kernel to perform scaling on a tensor */
class NEScaleKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEStackLayerKernel.cpp b/src/core/NEON/kernels/NEStackLayerKernel.cpp
index 55170a169a..64310e7f7f 100644
--- a/src/core/NEON/kernels/NEStackLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEStackLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,7 @@ namespace
Status validate_arguments(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON(idx_input >= num_tensors);
ARM_COMPUTE_RETURN_ERROR_ON(axis > input->num_dimensions());
diff --git a/src/core/NEON/kernels/NEStackLayerKernel.h b/src/core/NEON/kernels/NEStackLayerKernel.h
index 9b0a039b88..dc5c7d77ad 100644
--- a/src/core/NEON/kernels/NEStackLayerKernel.h
+++ b/src/core/NEON/kernels/NEStackLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,7 +32,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to stacks a rank-R tensor into one with rank-(R+1) along the axis dimension.*/
+/** Neon kernel to stacks a rank-R tensor into one with rank-(R+1) along the axis dimension.*/
class NEStackLayerKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NEThresholdKernel.cpp b/src/core/NEON/kernels/NEThresholdKernel.cpp
index 183bb8db5c..108f29f377 100644
--- a/src/core/NEON/kernels/NEThresholdKernel.cpp
+++ b/src/core/NEON/kernels/NEThresholdKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -112,7 +112,7 @@ Status NEThresholdKernel::validate(const ITensorInfo *input, const ITensorInfo *
inline void NEThresholdKernel::run_binary(const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using Type = uint8_t;
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<Type, wrapper::traits::BitWidth::W128>;
@@ -158,7 +158,7 @@ inline void NEThresholdKernel::run_binary(const Window &window)
inline void NEThresholdKernel::run_range(const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using Type = uint8_t;
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<Type, wrapper::traits::BitWidth::W128>;
diff --git a/src/core/NEON/kernels/NETileKernel.h b/src/core/NEON/kernels/NETileKernel.h
index 8dfea8bc2f..e6ce9534e7 100644
--- a/src/core/NEON/kernels/NETileKernel.h
+++ b/src/core/NEON/kernels/NETileKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform a tile operation */
+/** Neon kernel to perform a tile operation */
class NETileKernel : public INEKernel
{
public:
diff --git a/src/core/NEON/kernels/NETransposeKernel.cpp b/src/core/NEON/kernels/NETransposeKernel.cpp
index 134831be4c..980d90b476 100644
--- a/src/core/NEON/kernels/NETransposeKernel.cpp
+++ b/src/core/NEON/kernels/NETransposeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -59,7 +59,7 @@ TensorShape transposed_tensor_shape(const TensorShape &in)
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
if(output->total_size() != 0)
@@ -125,7 +125,7 @@ void transpose_8bit_elements(const ITensor *in, ITensor *out, const Window &wind
Iterator output(out, window_out);
- // Run the NEON path if and only if the input is not a row-vector
+ // Run the Neon path if and only if the input is not a row-vector
if(in->info()->dimension(1) != 1)
{
Iterator input(in, window_in);
@@ -264,7 +264,7 @@ void transpose_16bit_elements(const ITensor *in, ITensor *out, const Window &win
Iterator output(out, window_out);
- // Run the NEON path if and only if the input is not a row-vector
+ // Run the Neon path if and only if the input is not a row-vector
if(in->info()->dimension(1) != 1)
{
Iterator input(in, window_in);
@@ -377,7 +377,7 @@ void transpose_32bit_elements(const ITensor *in, ITensor *out, const Window &win
Iterator output(out, window_out);
- // Run the NEON path if and only if the input is not a row-vector
+ // Run the Neon path if and only if the input is not a row-vector
if(in->info()->dimension(1) != 1)
{
Iterator input(in, window_in);
diff --git a/src/core/NEON/kernels/NETransposeKernel.h b/src/core/NEON/kernels/NETransposeKernel.h
index 73d2098fb3..88ece547e1 100644
--- a/src/core/NEON/kernels/NETransposeKernel.h
+++ b/src/core/NEON/kernels/NETransposeKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel which transposes the elements of a matrix.
+/** Neon kernel which transposes the elements of a matrix.
*
* [width, height, batch] -> [height, width, batch]
*
diff --git a/src/core/NEON/kernels/NEWeightsReshapeKernel.cpp b/src/core/NEON/kernels/NEWeightsReshapeKernel.cpp
index 118655b755..109652fb97 100644
--- a/src/core/NEON/kernels/NEWeightsReshapeKernel.cpp
+++ b/src/core/NEON/kernels/NEWeightsReshapeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,7 @@ TensorShape get_output_shape(const ITensorInfo *input, bool has_bias)
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
if(biases != nullptr)
diff --git a/src/core/NEON/kernels/NEWeightsReshapeKernel.h b/src/core/NEON/kernels/NEWeightsReshapeKernel.h
index 9678b79fda..a4a9e28763 100644
--- a/src/core/NEON/kernels/NEWeightsReshapeKernel.h
+++ b/src/core/NEON/kernels/NEWeightsReshapeKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** NEON kernel to perform reshaping on the weights used by convolution and locally connected layer
+/** Neon kernel to perform reshaping on the weights used by convolution and locally connected layer
*
* Rearranges each 3-dimensional kernel to a single row leading to a matrix with linearized kernels.
* In combination with the @ref NEIm2ColKernel can transform a convolution to a matrix multiplication.
diff --git a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
index 2b87e512dc..3583735482 100644
--- a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,7 +35,7 @@ namespace arm_compute
// Forward declarations
class ITensor;
-/** Interface for the NEON kernel to perform Winograd input transform. */
+/** Interface for the Neon kernel to perform Winograd input transform. */
class INEWinogradLayerTransformInputKernel : public INEKernel
{
public:
@@ -96,7 +96,7 @@ public:
}
};
-/** NEON kernel to perform Winograd input transform. */
+/** Neon kernel to perform Winograd input transform. */
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
class NEWinogradLayerTransformInputKernel : public INEWinogradLayerTransformInputKernel
{
@@ -227,7 +227,7 @@ private:
ITensor *_workspace;
};
-/** Interface for the NEON kernel to perform Winograd output transform. */
+/** Interface for the Neon kernel to perform Winograd output transform. */
class INEWinogradLayerTransformOutputKernel : public INEKernel
{
public:
@@ -310,7 +310,7 @@ public:
}
};
-/** NEON kernel to perform Winograd output transform. */
+/** Neon kernel to perform Winograd output transform. */
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
class NEWinogradLayerTransformOutputKernel : public INEWinogradLayerTransformOutputKernel
{
@@ -437,7 +437,7 @@ private:
int _num_channels;
};
-/** Interface for the NEON kernel to perform Winograd weights transform. */
+/** Interface for the Neon kernel to perform Winograd weights transform. */
class INEWinogradLayerTransformWeightsKernel : public INEKernel
{
public:
@@ -495,7 +495,7 @@ public:
static Status validate(const ITensorInfo *input, const ITensorInfo *weights);
};
-/** NEON kernel to perform Winograd weights transform. */
+/** Neon kernel to perform Winograd weights transform. */
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
class NEWinogradLayerTransformWeightsKernel final : public INEWinogradLayerTransformWeightsKernel
{
@@ -577,7 +577,7 @@ private:
int _num_input_channels;
};
-/** NEON kernel to perform Winograd. */
+/** Neon kernel to perform Winograd. */
template <typename TIn, typename TOut, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
class NEWinogradLayerConfiguration
{
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index c1e34d12f7..b0e912d188 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -111,7 +111,7 @@ static const GemmImplementation<float, float> gemm_fp32_methods[] =
},
#endif // __ARM_FEATURE_SVE
-// NEON hybrid methods
+// Neon hybrid methods
{
GemmMethod::GEMM_HYBRID,
"a64_smallK_hybrid_fp32_mla_8x4",
diff --git a/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp b/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp
index 02d9486cc6..bb86d9e41d 100644
--- a/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp
+++ b/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp
@@ -292,21 +292,21 @@ void Interleave(TOut *out, const TIn *in, size_t in_stride, const unsigned int y
/* AArch32 */
#ifdef __arm__
/* FP32 */
-/* NEON implementation (height 6) */
+/* Neon implementation (height 6) */
template void IndirectInterleave<6, 1, VLType::None>(float *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<6, 1, VLType::None>(float *, const float *, size_t, const convolver<float> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<6, 1, VLType::None>(float *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
/* FP16 */
#if __ARM_FP16_ARGS
-/* NEON implementation using FP32 kernel (height 6) */
+/* Neon implementation using FP32 kernel (height 6) */
template void IndirectInterleave<6, 1, VLType::None>(float *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<6, 1, VLType::None>(float *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<6, 1, VLType::None>(float *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
#endif /* __ARM_FP16_ARGS */
/* BF16 */
-/* NEON implementation using FP32 kernel */
+/* Neon implementation using FP32 kernel */
template void IndirectInterleave<6, 1, VLType::None>(float *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<6, 1, VLType::None>(float *, const bfloat16 *, size_t, const convolver<bfloat16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<6, 1, VLType::None>(float *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
@@ -315,7 +315,7 @@ template void Interleave<6, 1, VLType::None>(float *, const bfloat16 *, size_t,
/* AArch64 */
#ifdef __aarch64__
/* FP32 */
-/* NEON/SVE implementation (height 8) */
+/* Neon/SVE implementation (height 8) */
template void IndirectInterleave<8, 1, VLType::None>(float *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<8, 1, VLType::None>(float *, const float *, size_t, const convolver<float> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<8, 1, VLType::None>(float *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
@@ -339,7 +339,7 @@ template void ConvolutionInterleave<8, 1, VLType::None>(float *, const __fp16 *,
template void Interleave<8, 1, VLType::None>(float *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
/* BF16 */
-/* NEON/SVE BFDOT */
+/* Neon/SVE BFDOT */
#ifdef V8P6_BF
template void IndirectInterleave<8, 2, VLType::None>(bfloat16 *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<8, 2, VLType::None>(bfloat16 *, const bfloat16 *, size_t, const convolver<bfloat16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
@@ -350,7 +350,7 @@ template void ConvolutionInterleave<8, 4, VLType::None>(bfloat16 *, const bfloat
template void Interleave<8, 4, VLType::None>(bfloat16 *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
#endif // V8P6_BF
-/* NEON/SVE using FP32 kernel */
+/* Neon/SVE using FP32 kernel */
template void IndirectInterleave<8, 1, VLType::None>(float *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<8, 1, VLType::None>(float *, const bfloat16 *, size_t, const convolver<bfloat16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<8, 1, VLType::None>(float *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
@@ -365,12 +365,12 @@ template void ConvolutionInterleave<8, 1, VLType::None>(uint16_t *, const uint16
template void Interleave<8, 1, VLType::None>(uint16_t *, const uint16_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
/* INT8 */
-/* NEON SMLA/SMLAL (height 4, block 16) */
+/* Neon SMLA/SMLAL (height 4, block 16) */
template void IndirectInterleave<4, 16, VLType::None>(int8_t *, const int8_t * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<4, 16, VLType::None>(int8_t *, const int8_t *, size_t, const convolver<int8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<4, 16, VLType::None>(int8_t *, const int8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
-/* NEON SDOT (height 8, block 4) */
+/* Neon SDOT (height 8, block 4) */
template void IndirectInterleave<8, 4, VLType::None>(int8_t *, const int8_t * const * const *, unsigned int, unsigned int, unsigned int y0, unsigned int ymax, unsigned int k0, unsigned int kmax, bool, int32_t);
template void ConvolutionInterleave<8, 4, VLType::None>(int8_t *, const int8_t *, size_t, const convolver<int8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<8, 4, VLType::None>(int8_t *, const int8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
@@ -382,17 +382,17 @@ template void ConvolutionInterleave<8, 8, VLType::None>(int8_t *, const int8_t *
template void Interleave<8, 8, VLType::None>(int8_t *, const int8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
#endif // MMLA_INT8
-/* NEON SDOT (height 8, block 1) */
+/* Neon SDOT (height 8, block 1) */
template void IndirectInterleave<8, 1, VLType::None>(int16_t *, const int8_t * const * const *, unsigned int, unsigned int, unsigned int y0, unsigned int ymax, unsigned int k0, unsigned int kmax, bool, int32_t);
template void ConvolutionInterleave<8, 1, VLType::None>(int16_t *, const int8_t *, size_t, const convolver<int8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<8, 1, VLType::None>(int16_t *, const int8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
-/* NEON SMLA/SMLAL (height 4, block 16) */
+/* Neon SMLA/SMLAL (height 4, block 16) */
template void IndirectInterleave<4, 16, VLType::None>(uint8_t *, const uint8_t * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<4, 16, VLType::None>(uint8_t *, const uint8_t *, size_t, const convolver<uint8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<4, 16, VLType::None>(uint8_t *, const uint8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
-/* NEON SDOT (height 8, block 4) */
+/* Neon SDOT (height 8, block 4) */
template void IndirectInterleave<8, 4, VLType::None>(uint8_t *, const uint8_t * const * const *, unsigned int, unsigned int, unsigned int y0, unsigned int ymax, unsigned int k0, unsigned int kmax, bool, int32_t);
template void ConvolutionInterleave<8, 4, VLType::None>(uint8_t *, const uint8_t *, size_t, const convolver<uint8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<8, 4, VLType::None>(uint8_t *, const uint8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
@@ -404,7 +404,7 @@ template void ConvolutionInterleave<8, 8, VLType::None>(uint8_t *, const uint8_t
template void Interleave<8, 8, VLType::None>(uint8_t *, const uint8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
#endif // MMLA_INT8
-/* NEON 16-bit (height 8, block 1) */
+/* Neon 16-bit (height 8, block 1) */
template void IndirectInterleave<8, 1, VLType::None>(uint16_t *, const uint8_t * const * const *, unsigned int, unsigned int, unsigned int y0, unsigned int ymax, unsigned int k0, unsigned int kmax, bool, int32_t);
template void ConvolutionInterleave<8, 1, VLType::None>(uint16_t *, const uint8_t *, size_t, const convolver<uint8_t> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<8, 1, VLType::None>(uint16_t *, const uint8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
diff --git a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp
index 704f003521..4108d79a5a 100644
--- a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp
+++ b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp
@@ -45,7 +45,7 @@ template <typename T>
void batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
float epsilon, ActivationLayerInfo &act_info, const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<float16_t, wrapper::traits::BitWidth::W128>;
const int window_step_x = 8;
@@ -149,4 +149,4 @@ void fp16_neon_batch_normalization(ITensor *src, ITensor *dst, const ITensor *me
} // namespace cpu
} // namespace arm_compute
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ \ No newline at end of file
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp
index 76a71ed738..1fdc5bd5b1 100644
--- a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp
+++ b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp
@@ -44,7 +44,7 @@ template <typename T>
void batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
float epsilon, ActivationLayerInfo &act_info, const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<float, wrapper::traits::BitWidth::W128>;
const int window_step_x = 4;
diff --git a/src/core/NEON/kernels/detail/NEActivationFunctionDetail.h b/src/core/NEON/kernels/detail/NEActivationFunctionDetail.h
index eef1be06eb..e68f1117e8 100644
--- a/src/core/NEON/kernels/detail/NEActivationFunctionDetail.h
+++ b/src/core/NEON/kernels/detail/NEActivationFunctionDetail.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,7 +34,7 @@ namespace detail
template <typename T, int S>
struct dummy
{
- /** NEON vector type. */
+ /** Neon vector type. */
using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
/** Construct a dummy activation object.
@@ -68,9 +68,9 @@ struct dummy
template <typename T, int S>
struct linear
{
- /** NEON vector type. */
+ /** Neon vector type. */
using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
/** Construct a Linear activation object.
@@ -112,9 +112,9 @@ struct linear
template <typename T, int S>
struct square
{
- /** NEON vector type. */
+ /** Neon vector type. */
using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
/** Construct a Square activation object.
@@ -148,9 +148,9 @@ struct square
template <typename T, int S>
struct logistic
{
- /** NEON vector type. */
+ /** Neon vector type. */
using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
/** Construct a Logistic activation object.
@@ -188,9 +188,9 @@ struct logistic
template <typename T, int S>
struct relu
{
- /** NEON vector type. */
+ /** Neon vector type. */
using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
/** Construct a RELU activation object.
@@ -228,9 +228,9 @@ struct relu
template <typename T, int S>
struct brelu
{
- /** NEON vector type. */
+ /** Neon vector type. */
using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
/** Construct a bounded RELU activation object.
@@ -270,9 +270,9 @@ struct brelu
template <typename T, int S>
struct lubrelu
{
- /** NEON vector type. */
+ /** Neon vector type. */
using ExactType = typename wrapper::traits::neon_vector<T, S>::type;
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
/** Construct a lower-upper bounded RELU activation object.
diff --git a/src/core/NEON/wrapper/traits.h b/src/core/NEON/wrapper/traits.h
index 0bb55afe02..b786e44bc7 100644
--- a/src/core/NEON/wrapper/traits.h
+++ b/src/core/NEON/wrapper/traits.h
@@ -44,7 +44,7 @@ struct vector_64_tag {};
/** 128-bit vector tag */
struct vector_128_tag {};
-/** Create the appropriate NEON vector given its type and size in terms of elements */
+/** Create the appropriate Neon vector given its type and size in terms of elements */
template <typename T, int S> struct neon_vector;
// Specializations
@@ -88,7 +88,7 @@ enum class BitWidth
W128, /**< 128-bit width */
};
-/** Create the appropriate NEON vector given its type and size in terms of bits */
+/** Create the appropriate Neon vector given its type and size in terms of bits */
template <typename T, BitWidth BW> struct neon_bitvector;
// Specializations
#ifndef DOXYGEN_SKIP_THIS
diff --git a/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp b/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp
index e51c341851..48eac13041 100644
--- a/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp
+++ b/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp
@@ -133,7 +133,7 @@ void batch_concat(const ITensor *src, ITensor *dst, unsigned int batch_offset, c
Status validate_arguments(const ITensorInfo *src, unsigned int batch_offset, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
diff --git a/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp b/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp
index dee0283a2c..f64c282ae4 100644
--- a/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp
+++ b/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp
@@ -134,7 +134,7 @@ void depth_concat(const ITensor *src, ITensor *dst, unsigned int depth_offset, c
Status validate_arguments(const ITensorInfo *input, unsigned int depth_offset, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
diff --git a/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp b/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp
index 8522c93340..c6e224970a 100644
--- a/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp
+++ b/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp
@@ -49,7 +49,7 @@ namespace
Status validate_arguments(const ITensorInfo *src, unsigned int height_offset, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(Window::DimX) != dst->dimension(Window::DimX));
diff --git a/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp b/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp
index 27ded05aff..e707e8d5a4 100644
--- a/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp
+++ b/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp
@@ -49,7 +49,7 @@ namespace
Status validate_arguments(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) + width_offset > dst->dimension(0));
diff --git a/src/core/cpu/kernels/CpuReshapeKernel.cpp b/src/core/cpu/kernels/CpuReshapeKernel.cpp
index 068f5d025b..41ff8bd390 100644
--- a/src/core/cpu/kernels/CpuReshapeKernel.cpp
+++ b/src/core/cpu/kernels/CpuReshapeKernel.cpp
@@ -50,7 +50,7 @@ namespace
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use NEON FP16 instructions.
+ // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use Neon FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
diff --git a/src/core/cpu/kernels/activation/NEON/fp16.cpp b/src/core/cpu/kernels/activation/NEON/fp16.cpp
index bd459e9e77..7fe4ab3f63 100644
--- a/src/core/cpu/kernels/activation/NEON/fp16.cpp
+++ b/src/core/cpu/kernels/activation/NEON/fp16.cpp
@@ -51,7 +51,7 @@ inline float16x8_t mask_float_vector(const float16x8_t &in, const uint16x8_t &ma
void fp16_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<float16_t, wrapper::traits::BitWidth::W128>;
const ActivationLayerInfo::ActivationFunction act = act_info.activation();
@@ -215,4 +215,4 @@ void fp16_neon_activation(const ITensor *src, ITensor *dst, const ActivationLaye
} // namespace cpu
} // namespace arm_compute
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ \ No newline at end of file
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/core/cpu/kernels/activation/NEON/fp32.cpp b/src/core/cpu/kernels/activation/NEON/fp32.cpp
index c76035b5d2..f1f2753813 100644
--- a/src/core/cpu/kernels/activation/NEON/fp32.cpp
+++ b/src/core/cpu/kernels/activation/NEON/fp32.cpp
@@ -49,7 +49,7 @@ inline float32x4_t mask_float_vector(const float32x4_t &in, const uint32x4_t &ma
void fp32_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename arm_compute::wrapper::traits::neon_bitvector_tag_t<float, wrapper::traits::BitWidth::W128>;
constexpr int window_step_x = 4;
diff --git a/src/core/cpu/kernels/add/neon/list.h b/src/core/cpu/kernels/add/neon/list.h
index 53ea81e284..964bdccca3 100644
--- a/src/core/cpu/kernels/add/neon/list.h
+++ b/src/core/cpu/kernels/add/neon/list.h
@@ -47,7 +47,7 @@ DECLARE_ADD_KERNEL(add_u8_u8_s16_neon);
template <typename ScalarType>
void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<ScalarType, wrapper::traits::BitWidth::W128>;
// Create input windows
@@ -143,4 +143,4 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
}
} // namespace cpu
} // namespace arm_compute
-#endif // SRC_CORE_NEON_KERNELS_ADD_LIST_H \ No newline at end of file
+#endif // SRC_CORE_NEON_KERNELS_ADD_LIST_H
diff --git a/src/core/cpu/kernels/pooling/neon/quantized.h b/src/core/cpu/kernels/pooling/neon/quantized.h
index 81fb777ef2..535fb53d87 100644
--- a/src/core/cpu/kernels/pooling/neon/quantized.h
+++ b/src/core/cpu/kernels/pooling/neon/quantized.h
@@ -473,7 +473,7 @@ void pooling2_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds
Iterator in(src, window_src);
Iterator out(dst0, window);
- /** NEON vector types */
+ /** Neon vector types */
using q8x8_t = typename wrapper::traits::neon_vector<T, 8>::type;
using q8x16_t = typename wrapper::traits::neon_vector<T, 16>::type;
using q8x8x2_t = typename std::conditional<std::is_same<T, uint8_t>::value, uint8x8x2_t, int8x8x2_t>::type;
@@ -602,7 +602,7 @@ void pooling3_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds
Iterator in(src, window_src);
Iterator out(dst0, window);
- /** NEON vector types */
+ /** Neon vector types */
using q8x8_t = typename wrapper::traits::neon_vector<T, 8>::type;
using q8x16_t = typename wrapper::traits::neon_vector<T, 16>::type;
using q8x8x2_t = typename std::conditional<std::is_same<T, uint8_t>::value, uint8x8x2_t, int8x8x2_t>::type;
@@ -756,7 +756,7 @@ void poolingMxN_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *
Iterator in(src, window_src);
Iterator out(dst0, window);
- /** NEON vector types */
+ /** Neon vector types */
using q8x8_t = typename wrapper::traits::neon_vector<T, 8>::type;
using q16_t = typename wrapper::traits::promote_t<T>;
using q16x8_t = typename wrapper::traits::neon_vector<q16_t, 8>::type;
@@ -860,4 +860,4 @@ void poolingMxN_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *
} // namespace cpu
} // namespace arm_compute
-#endif // SRC_CORE_NEON_KERNELS_QUANTIZED_H \ No newline at end of file
+#endif // SRC_CORE_NEON_KERNELS_QUANTIZED_H
diff --git a/src/core/cpu/kernels/softmax/impl/NEON/list.h b/src/core/cpu/kernels/softmax/impl/NEON/list.h
index 1aa7e8fac7..3f9438e0c7 100644
--- a/src/core/cpu/kernels/softmax/impl/NEON/list.h
+++ b/src/core/cpu/kernels/softmax/impl/NEON/list.h
@@ -73,7 +73,7 @@ float32x4x4_t convert_int_to_float<float32x4x4_t, int8x16_t>(const int8x16_t &in
template <typename T>
void neon_logits_1d_max(const ITensor *in, ITensor *out, const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
constexpr int window_step_x = 16 / sizeof(T);
@@ -304,7 +304,7 @@ void neon_softmax_logits_1d_float(const ITensor *in, const ITensor *max, void *c
Iterator max_it(max, window);
Iterator out_it(out, window);
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
constexpr int vec_size = 16 / sizeof(T);
diff --git a/src/core/cpu/kernels/sub/neon/list.h b/src/core/cpu/kernels/sub/neon/list.h
index d5685824fc..8c82402513 100644
--- a/src/core/cpu/kernels/sub/neon/list.h
+++ b/src/core/cpu/kernels/sub/neon/list.h
@@ -47,7 +47,7 @@ DECLARE_SUB_KERNEL(sub_u8_u8_s16_neon);
template <typename T>
void sub_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
- /** NEON vector tag type. */
+ /** Neon vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
bool is_sat = policy == ConvertPolicy::SATURATE;
@@ -159,4 +159,4 @@ void sub_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
}
} // namespace cpu
} // namespace arm_compute
-#endif // SRC_CORE_NEON_KERNELS_SUB_LIST_H \ No newline at end of file
+#endif // SRC_CORE_NEON_KERNELS_SUB_LIST_H
diff --git a/src/graph/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
index 7f87710cf3..a6c4fe9aa3 100644
--- a/src/graph/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,7 +52,7 @@ namespace graph
{
namespace backends
{
-/** Register NEON backend */
+/** Register Neon backend */
static detail::BackendRegistrar<NEDeviceBackend> NEDeviceBackend_registrar(Target::NEON);
NEDeviceBackend::NEDeviceBackend()
@@ -138,7 +138,7 @@ std::unique_ptr<ITensorHandle> NEDeviceBackend::create_subtensor(ITensorHandle *
std::unique_ptr<arm_compute::IFunction> NEDeviceBackend::configure_node(INode &node, GraphContext &ctx)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Configuring NEON node with ID : " << node.id() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Configuring Neon node with ID : " << node.id() << std::endl);
ARM_COMPUTE_ERROR_ON(node.assigned_target() != Target::NEON);
// Configure node
@@ -147,7 +147,7 @@ std::unique_ptr<arm_compute::IFunction> NEDeviceBackend::configure_node(INode &n
arm_compute::Status NEDeviceBackend::validate_node(INode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NEON node with ID : " << node.id() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating Neon node with ID : " << node.id() << std::endl);
ARM_COMPUTE_ERROR_ON(node.assigned_target() != Target::NEON);
return NENodeValidator::validate(&node);
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index 83954b6307..6cf5874633 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -54,7 +54,7 @@ struct NETargetInfo
Target NETargetInfo::TargetType = Target::NEON;
-/** Collection of NEON convolution functions */
+/** Collection of Neon convolution functions */
struct NEConvolutionLayerFunctions
{
using GenericConvolutionLayer = NEConvolutionLayer;
@@ -63,7 +63,7 @@ struct NEConvolutionLayerFunctions
using WinogradConvolutionLayer = NEWinogradConvolutionLayer;
};
-/** Collection of NEON element-wise functions */
+/** Collection of Neon element-wise functions */
struct NEEltwiseFunctions
{
using Addition = NEArithmeticAddition;
@@ -73,13 +73,13 @@ struct NEEltwiseFunctions
using Division = NEElementwiseDivision;
};
-/** Collection of NEON unary element-wise functions */
+/** Collection of Neon unary element-wise functions */
struct NEUnaryEltwiseFunctions
{
using Exp = NEExpLayer;
};
-/** Function and tensor types to be used inside a NEON fused convolution/batch normalization layer */
+/** Function and tensor types to be used inside a Neon fused convolution/batch normalization layer */
struct NEFusedLayerTypes
{
using ConvolutionLayer = NEConvolutionLayer;
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index 3fb79dbfc0..8453cfa03a 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -50,7 +50,7 @@ namespace graph
{
namespace backends
{
-/** Collection of NEON element-wise functions */
+/** Collection of Neon element-wise functions */
struct NEEltwiseLayerFunctions
{
using ArithmeticAddition = NEArithmeticAddition;
@@ -60,7 +60,7 @@ struct NEEltwiseLayerFunctions
using ArithmeticDivision = NEElementwiseDivision;
};
-/** Collection of NEON unary element-wise functions */
+/** Collection of Neon unary element-wise functions */
struct NEUnaryEltwiseLayerFunctions
{
using ExpLayer = NEExpLayer;
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index cc549ca31b..e43d976944 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -101,7 +101,7 @@ void NEConvolutionLayer::configure(ITensor *input, const ITensor *weights, const
Status NEConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1), "Grouping (num_groups != 1) is not supported on NEON");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1), "Grouping (num_groups != 1) is not supported on Neon");
const Conv2dInfo info(conv_info, dilation, act_info, enable_fast_math, num_groups);
switch(NEConvolutionLayer::get_convolution_method(input, weights, output, conv_info, weights_info, dilation, act_info, enable_fast_math))
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index 0fd21edee1..8114adadc2 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -285,7 +285,7 @@ private:
/** Assembly Gemm kernel */
std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
- /** Optimised NEON kernel */
+ /** Optimised Neon kernel */
std::unique_ptr<INEKernel> _optimised_kernel{ nullptr };
/** Input A */
const ITensor *_a
diff --git a/src/runtime/NEON/functions/NEGEMMConv2d.cpp b/src/runtime/NEON/functions/NEGEMMConv2d.cpp
index b8349d98db..c802298f98 100644
--- a/src/runtime/NEON/functions/NEGEMMConv2d.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConv2d.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -122,7 +122,7 @@ Status NEGEMMConv2d::validate(const ITensorInfo *input, const ITensorInfo *weigh
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::BFLOAT16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::BFLOAT16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.num_groups > 1, "Grouping (num_groups != 1) is not supported on NEON");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.num_groups > 1, "Grouping (num_groups != 1) is not supported on Neon");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_layout() != DataLayout::NHWC, "Data layout supported is NHWC");
const DataType data_type = input->data_type();
const TensorShape i_shape = input->tensor_shape();
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index 2d296f39ea..e623d9dc74 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -431,7 +431,7 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::BFLOAT16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::BFLOAT16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups > 1, "Grouping (num_groups != 1) is not supported on NEON");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups > 1, "Grouping (num_groups != 1) is not supported on Neon");
const DataLayout data_layout = input->data_layout();
const DataType data_type = input->data_type();
@@ -523,7 +523,7 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
if(!skip_im2col)
{
// Create tensor info for im2col reshaped inputs
- // For NEON the batch size is on the fourth dimension
+ // For Neon the batch size is on the fourth dimension
// TODO (giaiod01): Auto-initialize the output shape of im2col COMPMID-1482
TensorShape shape_im2col = input->tensor_shape();
shape_im2col.set(0, mat_weights_rows);
diff --git a/src/runtime/cpu/operators/CpuPooling.h b/src/runtime/cpu/operators/CpuPooling.h
index aa607b4b44..9ebcd5f6aa 100644
--- a/src/runtime/cpu/operators/CpuPooling.h
+++ b/src/runtime/cpu/operators/CpuPooling.h
@@ -40,7 +40,7 @@ namespace cpu
{
// Forward Declarations
class CpuPoolingAssemblyDispatch;
-/** Basic function to simulate a pooling layer with the specified pooling operation. This function calls the following NEON kernels:
+/** Basic function to simulate a pooling layer with the specified pooling operation. This function calls the following Neon kernels:
*
* -# @ref NEFillBorderKernel (executed if padding size is different from zero)
* -# @ref kernels::CpuPoolingKernel