aboutsummaryrefslogtreecommitdiff
path: root/src/core/cpu
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2021-03-09 14:09:08 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2021-03-31 17:08:51 +0000
commit33f41fabd30fb444aaa0cf3e65b61794d498d151 (patch)
treea381cff3096a3b05198b0cd311fee28e40fd5a4f /src/core/cpu
parent5f91b5d7063462854b62d342f9d4e04ae647e9a6 (diff)
downloadComputeLibrary-33f41fabd30fb444aaa0cf3e65b61794d498d151.tar.gz
Fix trademarks throughout the codebase
Resolves: COMPMID-4299 Change-Id: Ie6a52c1371b9a2a7b5bb4f019ecd5e70a2008567 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5338 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/cpu')
-rw-r--r--src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp2
-rw-r--r--src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp2
-rw-r--r--src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp2
-rw-r--r--src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp2
-rw-r--r--src/core/cpu/kernels/CpuReshapeKernel.cpp2
-rw-r--r--src/core/cpu/kernels/CpuTransposeKernel.cpp8
-rw-r--r--src/core/cpu/kernels/activation/NEON/fp16.cpp2
-rw-r--r--src/core/cpu/kernels/activation/NEON/fp32.cpp2
-rw-r--r--src/core/cpu/kernels/add/neon/list.h2
-rw-r--r--src/core/cpu/kernels/pooling/neon/quantized.h6
-rw-r--r--src/core/cpu/kernels/softmax/impl/NEON/list.h4
-rw-r--r--src/core/cpu/kernels/sub/neon/list.h2
12 files changed, 18 insertions, 18 deletions
diff --git a/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp b/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp
index bd6d777572..aaa1898ce9 100644
--- a/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp
+++ b/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp
@@ -133,7 +133,7 @@ void batch_concat(const ITensor *src, ITensor *dst, unsigned int batch_offset, c
Status validate_arguments(const ITensorInfo *src, unsigned int batch_offset, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use Neon FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
diff --git a/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp b/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp
index d8eed44cd8..35c189caeb 100644
--- a/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp
+++ b/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp
@@ -134,7 +134,7 @@ void depth_concat(const ITensor *src, ITensor *dst, unsigned int depth_offset, c
Status validate_arguments(const ITensorInfo *input, unsigned int depth_offset, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
diff --git a/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp b/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp
index 4dc458a4a8..363c271a68 100644
--- a/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp
+++ b/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp
@@ -49,7 +49,7 @@ namespace
Status validate_arguments(const ITensorInfo *src, unsigned int height_offset, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use Neon FP16 instructions.
+ // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(Window::DimX) != dst->dimension(Window::DimX));
diff --git a/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp b/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp
index efefd5d011..11b1db5bc2 100644
--- a/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp
+++ b/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp
@@ -49,7 +49,7 @@ namespace
Status validate_arguments(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use Neon FP16 instructions.
+ // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) + width_offset > dst->dimension(0));
diff --git a/src/core/cpu/kernels/CpuReshapeKernel.cpp b/src/core/cpu/kernels/CpuReshapeKernel.cpp
index 4ab1612518..70c652695a 100644
--- a/src/core/cpu/kernels/CpuReshapeKernel.cpp
+++ b/src/core/cpu/kernels/CpuReshapeKernel.cpp
@@ -50,7 +50,7 @@ namespace
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use Neon FP16 instructions.
+ // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
diff --git a/src/core/cpu/kernels/CpuTransposeKernel.cpp b/src/core/cpu/kernels/CpuTransposeKernel.cpp
index ed08aa1aa0..c7cafe94a8 100644
--- a/src/core/cpu/kernels/CpuTransposeKernel.cpp
+++ b/src/core/cpu/kernels/CpuTransposeKernel.cpp
@@ -95,7 +95,7 @@ void transpose_8bit_elements(const ITensor *in, ITensor *out, const Window &wind
Iterator output(out, window_out);
- // Run the Neon path if and only if the input is not a row-vector
+ // Run the SIMD path if and only if the input is not a row-vector
if(in->info()->dimension(1) != 1)
{
Iterator input(in, window_in);
@@ -234,7 +234,7 @@ void transpose_16bit_elements(const ITensor *in, ITensor *out, const Window &win
Iterator output(out, window_out);
- // Run the Neon path if and only if the input is not a row-vector
+ // Run the SIMD path if and only if the input is not a row-vector
if(in->info()->dimension(1) != 1)
{
Iterator input(in, window_in);
@@ -347,7 +347,7 @@ void transpose_32bit_elements(const ITensor *in, ITensor *out, const Window &win
Iterator output(out, window_out);
- // Run the Neon path if and only if the input is not a row-vector
+ // Run the SIMD path if and only if the input is not a row-vector
if(in->info()->dimension(1) != 1)
{
Iterator input(in, window_in);
@@ -455,7 +455,7 @@ void CpuTransposeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
Status CpuTransposeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use Neon FP16 instructions.
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions.
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
// Error if input is not 8 bit, 16bit or 32bit
diff --git a/src/core/cpu/kernels/activation/NEON/fp16.cpp b/src/core/cpu/kernels/activation/NEON/fp16.cpp
index 0ddd43ea0e..6f2d5d8533 100644
--- a/src/core/cpu/kernels/activation/NEON/fp16.cpp
+++ b/src/core/cpu/kernels/activation/NEON/fp16.cpp
@@ -50,7 +50,7 @@ inline float16x8_t mask_float_vector(const float16x8_t &in, const uint16x8_t &ma
void fp16_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
- /** Neon vector tag type. */
+ /** SIMD vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<float16_t, wrapper::traits::BitWidth::W128>;
const ActivationLayerInfo::ActivationFunction act = act_info.activation();
diff --git a/src/core/cpu/kernels/activation/NEON/fp32.cpp b/src/core/cpu/kernels/activation/NEON/fp32.cpp
index 244ca5739f..54301d45ad 100644
--- a/src/core/cpu/kernels/activation/NEON/fp32.cpp
+++ b/src/core/cpu/kernels/activation/NEON/fp32.cpp
@@ -48,7 +48,7 @@ inline float32x4_t mask_float_vector(const float32x4_t &in, const uint32x4_t &ma
void fp32_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
- /** Neon vector tag type. */
+ /** SIMD vector tag type. */
using ExactTagType = typename arm_compute::wrapper::traits::neon_bitvector_tag_t<float, wrapper::traits::BitWidth::W128>;
constexpr int window_step_x = 4;
diff --git a/src/core/cpu/kernels/add/neon/list.h b/src/core/cpu/kernels/add/neon/list.h
index 964bdccca3..3ab03dd40e 100644
--- a/src/core/cpu/kernels/add/neon/list.h
+++ b/src/core/cpu/kernels/add/neon/list.h
@@ -47,7 +47,7 @@ DECLARE_ADD_KERNEL(add_u8_u8_s16_neon);
template <typename ScalarType>
void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
- /** Neon vector tag type. */
+ /** SIMD vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<ScalarType, wrapper::traits::BitWidth::W128>;
// Create input windows
diff --git a/src/core/cpu/kernels/pooling/neon/quantized.h b/src/core/cpu/kernels/pooling/neon/quantized.h
index 535fb53d87..a16960a205 100644
--- a/src/core/cpu/kernels/pooling/neon/quantized.h
+++ b/src/core/cpu/kernels/pooling/neon/quantized.h
@@ -473,7 +473,7 @@ void pooling2_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds
Iterator in(src, window_src);
Iterator out(dst0, window);
- /** Neon vector types */
+ /** SIMD vector types */
using q8x8_t = typename wrapper::traits::neon_vector<T, 8>::type;
using q8x16_t = typename wrapper::traits::neon_vector<T, 16>::type;
using q8x8x2_t = typename std::conditional<std::is_same<T, uint8_t>::value, uint8x8x2_t, int8x8x2_t>::type;
@@ -602,7 +602,7 @@ void pooling3_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds
Iterator in(src, window_src);
Iterator out(dst0, window);
- /** Neon vector types */
+ /** SIMD vector types */
using q8x8_t = typename wrapper::traits::neon_vector<T, 8>::type;
using q8x16_t = typename wrapper::traits::neon_vector<T, 16>::type;
using q8x8x2_t = typename std::conditional<std::is_same<T, uint8_t>::value, uint8x8x2_t, int8x8x2_t>::type;
@@ -756,7 +756,7 @@ void poolingMxN_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *
Iterator in(src, window_src);
Iterator out(dst0, window);
- /** Neon vector types */
+ /** SIMD vector types */
using q8x8_t = typename wrapper::traits::neon_vector<T, 8>::type;
using q16_t = typename wrapper::traits::promote_t<T>;
using q16x8_t = typename wrapper::traits::neon_vector<q16_t, 8>::type;
diff --git a/src/core/cpu/kernels/softmax/impl/NEON/list.h b/src/core/cpu/kernels/softmax/impl/NEON/list.h
index 740e6ea9bc..5ebee31272 100644
--- a/src/core/cpu/kernels/softmax/impl/NEON/list.h
+++ b/src/core/cpu/kernels/softmax/impl/NEON/list.h
@@ -36,7 +36,7 @@ namespace cpu
template <typename T>
void neon_logits_1d_max(const ITensor *in, ITensor *out, const Window &window)
{
- /** Neon vector tag type. */
+ /** SIMD vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
constexpr int window_step_x = 16 / sizeof(T);
@@ -267,7 +267,7 @@ void neon_softmax_logits_1d_float(const ITensor *in, const ITensor *max, void *c
Iterator max_it(max, window);
Iterator out_it(out, window);
- /** Neon vector tag type. */
+ /** SIMD vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
constexpr int vec_size = 16 / sizeof(T);
diff --git a/src/core/cpu/kernels/sub/neon/list.h b/src/core/cpu/kernels/sub/neon/list.h
index 8c82402513..1ab4e6367b 100644
--- a/src/core/cpu/kernels/sub/neon/list.h
+++ b/src/core/cpu/kernels/sub/neon/list.h
@@ -47,7 +47,7 @@ DECLARE_SUB_KERNEL(sub_u8_u8_s16_neon);
template <typename T>
void sub_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
- /** Neon vector tag type. */
+ /** SIMD vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
bool is_sat = policy == ConvertPolicy::SATURATE;