diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/core/NEON/NEKernels.h | 1 | ||||
-rw-r--r-- | src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp | 331 | ||||
-rw-r--r-- | src/core/NEON/kernels/NEArithmeticAdditionKernel.h | 107 | ||||
-rw-r--r-- | src/core/cpu/kernels/CpuAddKernel.cpp | 347 | ||||
-rw-r--r-- | src/core/cpu/kernels/CpuAddKernel.h | 85 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/neon/integer.cpp (renamed from src/core/NEON/kernels/arithmetic_addition/impl/NEON/integer.cpp) | 28 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/neon/list.h (renamed from src/core/NEON/kernels/arithmetic_addition/impl/NEON/list.h) | 44 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/neon/qasymm8.cpp (renamed from src/core/NEON/kernels/arithmetic_addition/impl/NEON/qasymm8.cpp) | 28 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/neon/qasymm8_signed.cpp (renamed from src/core/NEON/kernels/arithmetic_addition/impl/NEON/qasymm8_signed.cpp) | 28 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/neon/qsymm16.cpp (renamed from src/core/NEON/kernels/arithmetic_addition/impl/NEON/qsymm16.cpp) | 28 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/sve/integer.cpp (renamed from src/core/NEON/kernels/arithmetic_addition/impl/SVE/integer.cpp) | 88 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/sve/list.h (renamed from src/core/NEON/kernels/arithmetic_addition/impl/SVE/list.h) | 50 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/sve/qasymm8.cpp (renamed from src/core/NEON/kernels/arithmetic_addition/impl/SVE/qasymm8.cpp) | 26 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/sve/qasymm8_signed.cpp (renamed from src/core/NEON/kernels/arithmetic_addition/impl/SVE/qasymm8_signed.cpp) | 26 | ||||
-rw-r--r-- | src/core/cpu/kernels/add/sve/qsymm16.cpp (renamed from src/core/NEON/kernels/arithmetic_addition/impl/SVE/qsymm16.cpp) | 26 | ||||
-rw-r--r-- | src/runtime/NEON/functions/NEArithmeticAddition.cpp | 38 | ||||
-rw-r--r-- | src/runtime/cpu/operators/CpuAdd.cpp | 46 | ||||
-rw-r--r-- | src/runtime/cpu/operators/CpuAdd.h | 77 |
18 files changed, 751 insertions, 653 deletions
diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h index 64c1c8f79b..6c31a7324c 100644 --- a/src/core/NEON/NEKernels.h +++ b/src/core/NEON/NEKernels.h @@ -27,7 +27,6 @@ /* Header regrouping all the NEON kernels */ #include "src/core/NEON/kernels/NEAbsoluteDifferenceKernel.h" #include "src/core/NEON/kernels/NEAccumulateKernel.h" -#include "src/core/NEON/kernels/NEArithmeticAdditionKernel.h" #include "src/core/NEON/kernels/NEArithmeticSubtractionKernel.h" #include "src/core/NEON/kernels/NEBatchNormalizationLayerKernel.h" #include "src/core/NEON/kernels/NEBatchToSpaceLayerKernel.h" diff --git a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp deleted file mode 100644 index 4b53d26a5a..0000000000 --- a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/core/NEON/kernels/NEArithmeticAdditionKernel.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/Validate.h" -#include "src/core/CPP/Validate.h" -#include "src/core/NEON/kernels/arithmetic_addition/impl/NEON/list.h" -#include "src/core/NEON/kernels/arithmetic_addition/impl/SVE/list.h" -#include "src/core/NEON/wrapper/wrapper.h" -#include "src/core/common/Registrars.h" -#include "src/core/helpers/AutoConfiguration.h" -#include "src/core/helpers/WindowHelpers.h" - -#include <map> -#include <string> - -namespace arm_compute -{ -namespace -{ -struct ArithmeticAdditionSelectorData -{ - DataType dt1; - DataType dt2; - DataType dt3; -}; - -using ArithmeticAdditionSelectorPtr = std::add_pointer<bool(const ArithmeticAdditionSelectorData &data)>::type; - -struct ArithmeticAdditionKernel -{ - const char *name; - const ArithmeticAdditionSelectorPtr is_selected; - NEArithmeticAdditionKernel::ArithmeticAdditionKernelPtr ukernel; -}; - -static const ArithmeticAdditionKernel available_kernels[] = -{ -#if defined(__ARM_FEATURE_SVE) - { - "arithmetic_addition_same_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::F32)); }, - REGISTER_FP32_SVE(arm_compute::cpu::arithmetic_addition_same_sve<float>) - }, - { - "arithmetic_addition_same_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::F16)); }, - REGISTER_FP16_SVE(arm_compute::cpu::arithmetic_addition_same_sve<float16_t>) - }, - { - "arithmetic_addition_same_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::U8)); }, - REGISTER_INTEGER_SVE(arm_compute::cpu::arithmetic_addition_same_sve<uint8_t>) - }, - { - "arithmetic_addition_same_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::S16)); }, - REGISTER_INTEGER_SVE(arm_compute::cpu::arithmetic_addition_same_sve<int16_t>) - }, - { - "arithmetic_addition_same_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::S32)); }, - REGISTER_INTEGER_SVE(arm_compute::cpu::arithmetic_addition_same_sve<int32_t>) - }, - { - "arithmetic_addition_U8_S16_S16_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == DataType::U8) && (data.dt2 == DataType::S16)); }, - REGISTER_INTEGER_SVE(arm_compute::cpu::arithmetic_addition_U8_S16_S16_sve) - }, - { - "arithmetic_addition_S16_U8_S16_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == DataType::S16) && (data.dt2 == DataType::U8)); }, - REGISTER_INTEGER_SVE(arm_compute::cpu::arithmetic_addition_S16_U8_S16_sve) - }, - { - "arithmetic_addition_U8_U8_S16_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt3 == DataType::S16)); }, - REGISTER_INTEGER_SVE(arm_compute::cpu::arithmetic_addition_U8_U8_S16_sve) - }, -#else /* !defined(__ARM_FEATURE_SVE) */ - { - "arithmetic_addition_same_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::F32)); }, - REGISTER_FP32_NEON(arm_compute::cpu::arithmetic_addition_same_neon<float>) - }, -#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) - { - "arithmetic_addition_same_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::F16)); }, - REGISTER_FP16_NEON(arm_compute::cpu::arithmetic_addition_same_neon<float16_t>) - }, -#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */ - { - "arithmetic_addition_same_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::U8)); }, - REGISTER_INTEGER_NEON(arm_compute::cpu::arithmetic_addition_same_neon<uint8_t>) - }, - { - "arithmetic_addition_same_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::S16)); }, - REGISTER_INTEGER_NEON(arm_compute::cpu::arithmetic_addition_same_neon<int16_t>) - }, - { - "arithmetic_addition_same_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::S32)); }, - REGISTER_INTEGER_NEON(arm_compute::cpu::arithmetic_addition_same_neon<int32_t>) - }, - { - "arithmetic_addition_U8_S16_S16_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == DataType::U8) && (data.dt2 == DataType::S16)); }, - REGISTER_INTEGER_NEON(arm_compute::cpu::arithmetic_addition_U8_S16_S16_neon) - }, - { - "arithmetic_addition_S16_U8_S16_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == DataType::S16) && (data.dt2 == DataType::U8)); }, - REGISTER_INTEGER_NEON(arm_compute::cpu::arithmetic_addition_S16_U8_S16_neon) - }, - { - "arithmetic_addition_U8_U8_S16_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt3 == DataType::S16)); }, - REGISTER_INTEGER_NEON(arm_compute::cpu::arithmetic_addition_U8_U8_S16_neon) - }, -#endif /* defined(__ARM_FEATURE_SVE) */ - -#if defined(__ARM_FEATURE_SVE2) - { - "arithmetic_addition_qasymm8_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QASYMM8)); }, - REGISTER_QASYMM8_SVE(arm_compute::cpu::arithmetic_addition_qasymm8_sve) - }, - { - "arithmetic_addition_qasymm8_signed_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QASYMM8_SIGNED)); }, - REGISTER_QASYMM8_SIGNED_SVE(arm_compute::cpu::arithmetic_addition_qasymm8_signed_sve) - }, - { - "arithmetic_addition_qsymm16_sve", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QSYMM16)); }, - REGISTER_QSYMM16_SVE(arm_compute::cpu::arithmetic_addition_qsymm16_sve) - }, -#else /* !defined(__ARM_FEATURE_SVE2) */ - { - "arithmetic_addition_qasymm8_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QASYMM8)); }, - REGISTER_QASYMM8_NEON(arm_compute::cpu::arithmetic_addition_qasymm8_neon) - }, - { - "arithmetic_addition_qasymm8_signed_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QASYMM8_SIGNED)); }, - REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::arithmetic_addition_qasymm8_signed_neon) - }, - { - "arithmetic_addition_qsymm16_neon", - [](const ArithmeticAdditionSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QSYMM16)); }, - REGISTER_QSYMM16_NEON(arm_compute::cpu::arithmetic_addition_qsymm16_neon) - }, -#endif /* defined(__ARM_FEATURE_SVE2) */ - -}; - -const ArithmeticAdditionKernel *get_implementation(DataType dt1, DataType dt2, DataType dt3) -{ - for(const auto &uk : available_kernels) - { - if(uk.is_selected({ dt1, dt2, dt3 })) - { - return &uk; - } - } - return nullptr; -} - -Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output, ConvertPolicy policy) -{ - ARM_COMPUTE_UNUSED(policy); - - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input1); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, - DataType::S16, DataType::QSYMM16, DataType::F16, - DataType::S32, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, - DataType::S16, DataType::QSYMM16, DataType::F16, - DataType::S32, DataType::F32); - - const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape()); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((input1.tensor_shape().x() != input2.tensor_shape().x()) && ((input1.data_type() != input2.data_type()) || (input1.data_type() != output.data_type()) - || (input2.data_type() != output.data_type())), - "Broadcasting across width is supported on configurations where all tensors have the same data type"); - - // Validate in case of configured output - if(output.total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::U8) - && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16) - && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16) - && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16) - && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16) - && !(input1.data_type() == DataType::S32 && input2.data_type() == DataType::S32 && output.data_type() == DataType::S32) - && !(input1.data_type() == DataType::F32 && input2.data_type() == DataType::F32 && output.data_type() == DataType::F32) - && !(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16 && output.data_type() == DataType::F16) - && !(input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8 && output.data_type() == DataType::QASYMM8) - && !(input1.data_type() == DataType::QASYMM8_SIGNED && input2.data_type() == DataType::QASYMM8_SIGNED && output.data_type() == DataType::QASYMM8_SIGNED) - && !(input1.data_type() == DataType::QSYMM16 && input2.data_type() == DataType::QSYMM16 && output.data_type() == DataType::QSYMM16), - "You called addition with the wrong image formats"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0), - "Wrong shape for output"); - } - - return Status{}; -} - -std::pair<Status, Window> validate_and_configure_window(const ITensorInfo &input1, const ITensorInfo &input2, ITensorInfo &output) -{ - const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2); - const TensorShape &out_shape = broadcast_pair.first; - const ValidRegion &valid_region = broadcast_pair.second; - - // Auto initialize output if not initialized - { - set_shape_if_empty(output, out_shape); - - if(input1.data_type() == DataType::S16 || input2.data_type() == DataType::S16) - { - set_format_if_unknown(output, Format::S16); - } - if(input1.data_type() == DataType::S32 || input2.data_type() == DataType::S32) - { - set_format_if_unknown(output, Format::S32); - } - else if(input1.data_type() == DataType::F16 || input2.data_type() == DataType::F16) - { - set_format_if_unknown(output, Format::F16); - } - else if(input1.data_type() == DataType::F32 || input2.data_type() == DataType::F32) - { - set_format_if_unknown(output, Format::F32); - } - else if(input1.data_type() == DataType::QASYMM8 || input2.data_type() == DataType::QASYMM8) - { - set_data_type_if_unknown(output, DataType::QASYMM8); - } - else if(input1.data_type() == DataType::QASYMM8_SIGNED || input2.data_type() == DataType::QASYMM8_SIGNED) - { - set_data_type_if_unknown(output, DataType::QASYMM8_SIGNED); - } - else if(input1.data_type() == DataType::QSYMM16 || input2.data_type() == DataType::QSYMM16) - { - set_data_type_if_unknown(output, DataType::QSYMM16); - } - } - - Window win = calculate_max_window(valid_region, Steps()); - - // NEArithmeticAdditionKernel doesn't need padding so update_window_and_padding() can be skipped - Coordinates coord; - coord.set_num_dimensions(output.num_dimensions()); - output.set_valid_region(valid_region); - return std::make_pair(Status{}, win); -} -} // namespace - -NEArithmeticAdditionKernel::NEArithmeticAdditionKernel() - : _func(nullptr), _policy() -{ -} - -void NEArithmeticAdditionKernel::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1, *input2, *output, policy)); - - _policy = policy; - _func = get_implementation(input1->data_type(), input2->data_type(), output->data_type())->ukernel; - - // Configure kernel window - auto win_config = validate_and_configure_window(*input1, *input2, *output); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - INEKernel::configure(win_config.second); -} - -Status NEArithmeticAdditionKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output); - - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output, policy)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(*input1->clone(), *input2->clone(), *output->clone()).first); - - return Status{}; -} - -void NEArithmeticAdditionKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); - // Dispatch kernel - (*_func)(tensors.get_const_tensor(TensorType::ACL_SRC_0), - tensors.get_const_tensor(TensorType::ACL_SRC_1), - tensors.get_tensor(TensorType::ACL_DST), - _policy, - window); -} -} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEArithmeticAdditionKernel.h b/src/core/NEON/kernels/NEArithmeticAdditionKernel.h deleted file mode 100644 index b88fc8aa74..0000000000 --- a/src/core/NEON/kernels/NEArithmeticAdditionKernel.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_NEARITHMETICADDITIONKERNEL_H -#define ARM_COMPUTE_NEARITHMETICADDITIONKERNEL_H - -#include "arm_compute/core/Types.h" -#include "src/core/NEON/INEKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** Interface for the kernel to perform addition between two tensors */ -class NEArithmeticAdditionKernel : public INEKernel -{ -public: - const char *name() const override - { - return "NEArithmeticAdditionKernel"; - } - /** Default constructor */ - NEArithmeticAdditionKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEArithmeticAdditionKernel(const NEArithmeticAdditionKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEArithmeticAdditionKernel &operator=(const NEArithmeticAdditionKernel &) = delete; - /** Allow instances of this class to be moved */ - NEArithmeticAdditionKernel(NEArithmeticAdditionKernel &&) = default; - /** Allow instances of this class to be moved */ - NEArithmeticAdditionKernel &operator=(NEArithmeticAdditionKernel &&) = default; - /** Default destructor */ - ~NEArithmeticAdditionKernel() = default; - - /** Initialise the kernel's input, output and border mode. - * - * Valid configurations (Input1,Input2) -> Output : - * - * - (U8,U8) -> U8 - * - (U8,U8) -> S16 - * - (S16,U8) -> S16 - * - (U8,S16) -> S16 - * - (S16,S16) -> S16 - * - (S32,S32) -> S32 - * - (F16,F16) -> F16 - * - (F32,F32) -> F32 - * - (QASYMM8,QASYMM8) -> QASYMM8 - * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED - * - (QSYMM16,QSYMM16) -> QSYMM16 - * - * @param[in] input1 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 - * @param[in] input2 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 - * @param[out] output The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] policy Overflow policy. - */ - void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy); - /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAdditionKernel - * - * @param[in] input1 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 - * @param[in] input2 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 - * @param[in] output The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] policy Overflow policy. - * - * @return a status - */ - static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy); - - // Inherited methods overridden: - void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; - - /** Common signature for all the specialised add functions - * - * @param[in] input1 First input tensor. Data types supported: U8/QASYMM8/S16/QSYMM16/F16/S32/F32 - * @param[in] input2 Second input tensor. Data types supported: U8/QASYMM8/S16/QSYMM16/F16/S32/F32 - * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/QSYMM16/F16/S32/F32. - * @param[in] policy Overflow policy. - * @param[in] window Region on which to execute the kernel. - */ - using ArithmeticAdditionKernelPtr = std::add_pointer<void(const ITensor *, const ITensor *, ITensor *, const ConvertPolicy &, const Window &)>::type; - -private: - /** Add function to use for the particular tensor types passed to configure() */ - ArithmeticAdditionKernelPtr _func; - ConvertPolicy _policy; -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_NEARITHMETICADDITIONKERNEL_H */ diff --git a/src/core/cpu/kernels/CpuAddKernel.cpp b/src/core/cpu/kernels/CpuAddKernel.cpp new file mode 100644 index 0000000000..31c7b2af60 --- /dev/null +++ b/src/core/cpu/kernels/CpuAddKernel.cpp @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/core/cpu/kernels/CpuAddKernel.h" + +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" +#include "src/core/common/Registrars.h" +#include "src/core/cpu/kernels/add/neon/list.h" +#include "src/core/cpu/kernels/add/sve/list.h" +#include "src/core/helpers/AutoConfiguration.h" +#include "src/core/helpers/WindowHelpers.h" + +#include <array> + +namespace arm_compute +{ +namespace cpu +{ +namespace kernels +{ +namespace +{ +struct AddSelectorData +{ + DataType dt1; + DataType dt2; + DataType dt3; +}; + +using AddSelectorPtr = std::add_pointer<bool(const AddSelectorData &data)>::type; +using AddKernelPtr = std::add_pointer<void(const ITensor *, const ITensor *, ITensor *, const ConvertPolicy &, const Window &)>::type; +struct AddKernel +{ + const char *name; + const AddSelectorPtr is_selected; + AddKernelPtr ukernel; +}; + +static const AddKernel available_kernels[] = +{ +#if defined(__ARM_FEATURE_SVE) + { + "add_same_sve", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::F32)); }, + REGISTER_FP32_SVE(arm_compute::cpu::add_same_sve<float>) + }, + { + "add_same_sve", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::F16)); }, + REGISTER_FP16_SVE(arm_compute::cpu::add_same_sve<float16_t>) + }, + { + "add_same_sve", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::U8)); }, + REGISTER_INTEGER_SVE(arm_compute::cpu::add_same_sve<uint8_t>) + }, + { + "add_same_sve", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::S16)); }, + REGISTER_INTEGER_SVE(arm_compute::cpu::add_same_sve<int16_t>) + }, + { + "add_same_sve", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::S32)); }, + REGISTER_INTEGER_SVE(arm_compute::cpu::add_same_sve<int32_t>) + }, + { + "add_u8_s16_s16_sve", + [](const AddSelectorData & data) { return ((data.dt1 == DataType::U8) && (data.dt2 == DataType::S16)); }, + REGISTER_INTEGER_SVE(arm_compute::cpu::add_u8_s16_s16_sve) + }, + { + "add_s16_u8_s16_sve", + [](const AddSelectorData & data) { return ((data.dt1 == DataType::S16) && (data.dt2 == DataType::U8)); }, + REGISTER_INTEGER_SVE(arm_compute::cpu::add_s16_u8_s16_sve) + }, + { + "add_u8_u8_s16_sve", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt3 == DataType::S16)); }, + REGISTER_INTEGER_SVE(arm_compute::cpu::add_u8_u8_s16_sve) + }, +#else /* !defined(__ARM_FEATURE_SVE) */ + { + "add_same_neon", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::F32)); }, + REGISTER_FP32_NEON(arm_compute::cpu::add_same_neon<float>) + }, +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + { + "add_same_neon", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::F16)); }, + REGISTER_FP16_NEON(arm_compute::cpu::add_same_neon<float16_t>) + }, +#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */ + { + "add_same_neon", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::U8)); }, + REGISTER_INTEGER_NEON(arm_compute::cpu::add_same_neon<uint8_t>) + }, + { + "add_same_neon", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::S16)); }, + REGISTER_INTEGER_NEON(arm_compute::cpu::add_same_neon<int16_t>) + }, + { + "add_same_neon", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == data.dt3) && (data.dt1 == DataType::S32)); }, + REGISTER_INTEGER_NEON(arm_compute::cpu::add_same_neon<int32_t>) + }, + { + "add_u8_s16_s16_neon", + [](const AddSelectorData & data) { return ((data.dt1 == DataType::U8) && (data.dt2 == DataType::S16)); }, + REGISTER_INTEGER_NEON(arm_compute::cpu::add_u8_s16_s16_neon) + }, + { + "add_s16_u8_s16_neon", + [](const AddSelectorData & data) { return ((data.dt1 == DataType::S16) && (data.dt2 == DataType::U8)); }, + REGISTER_INTEGER_NEON(arm_compute::cpu::add_s16_u8_s16_neon) + }, + { + "add_u8_u8_s16_neon", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt3 == DataType::S16)); }, + REGISTER_INTEGER_NEON(arm_compute::cpu::add_u8_u8_s16_neon) + }, +#endif /* defined(__ARM_FEATURE_SVE) */ + +#if defined(__ARM_FEATURE_SVE2) + { + "add_qasymm8_sve", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QASYMM8)); }, + REGISTER_QASYMM8_SVE(arm_compute::cpu::add_qasymm8_sve) + }, + { + "add_qasymm8_signed_sve", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QASYMM8_SIGNED)); }, + REGISTER_QASYMM8_SIGNED_SVE(arm_compute::cpu::add_qasymm8_signed_sve) + }, + { + "add_qsymm16_sve", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QSYMM16)); }, + REGISTER_QSYMM16_SVE(arm_compute::cpu::add_qsymm16_sve) + }, +#else /* !defined(__ARM_FEATURE_SVE2) */ + { + "add_qasymm8_neon", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QASYMM8)); }, + REGISTER_QASYMM8_NEON(arm_compute::cpu::add_qasymm8_neon) + }, + { + "add_qasymm8_signed_neon", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QASYMM8_SIGNED)); }, + REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::add_qasymm8_signed_neon) + }, + { + "add_qsymm16_neon", + [](const AddSelectorData & data) { return ((data.dt1 == data.dt2) && (data.dt1 == DataType::QSYMM16)); }, + REGISTER_QSYMM16_NEON(arm_compute::cpu::add_qsymm16_neon) + }, +#endif /* defined(__ARM_FEATURE_SVE2) */ + +}; + +/** Micro-kernel selector + * + * @param[in] data Selection data passed to help pick the appropriate micro-kernel + * + * @return A matching micro-kernel else nullptr + */ +const AddKernel *get_implementation(DataType dt1, DataType dt2, DataType dt3) +{ + for(const auto &uk : available_kernels) + { + if(uk.is_selected({ dt1, dt2, dt3 })) + { + return &uk; + } + } + return nullptr; +} + +Status validate_arguments(const ITensorInfo &src0, const ITensorInfo &src1, const ITensorInfo &dst, ConvertPolicy policy) +{ + ARM_COMPUTE_UNUSED(policy); + + ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&src0); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src0, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, + DataType::S16, DataType::QSYMM16, DataType::F16, + DataType::S32, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src1, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, + DataType::S16, DataType::QSYMM16, DataType::F16, + DataType::S32, DataType::F32); + + const TensorShape out_shape = TensorShape::broadcast_shape(src0.tensor_shape(), src1.tensor_shape()); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((src0.tensor_shape().x() != src1.tensor_shape().x()) && ((src0.data_type() != src1.data_type()) || (src0.data_type() != dst.data_type()) + || (src1.data_type() != dst.data_type())), + "Broadcasting across width is supported on configurations where all tensors have the same data type"); + + // Validate in case of configured dst + if(dst.total_size() > 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG( + !(src0.data_type() == DataType::U8 && src1.data_type() == DataType::U8 && dst.data_type() == DataType::U8) + && !(src0.data_type() == DataType::U8 && src1.data_type() == DataType::U8 && dst.data_type() == DataType::S16) + && !(src0.data_type() == DataType::U8 && src1.data_type() == DataType::S16 && dst.data_type() == DataType::S16) + && !(src0.data_type() == DataType::S16 && src1.data_type() == DataType::U8 && dst.data_type() == DataType::S16) + && !(src0.data_type() == DataType::S16 && src1.data_type() == DataType::S16 && dst.data_type() == DataType::S16) + && !(src0.data_type() == DataType::S32 && src1.data_type() == DataType::S32 && dst.data_type() == DataType::S32) + && !(src0.data_type() == DataType::F32 && src1.data_type() == DataType::F32 && dst.data_type() == DataType::F32) + && !(src0.data_type() == DataType::F16 && src1.data_type() == DataType::F16 && dst.data_type() == DataType::F16) + && !(src0.data_type() == DataType::QASYMM8 && src1.data_type() == DataType::QASYMM8 && dst.data_type() == DataType::QASYMM8) + && !(src0.data_type() == DataType::QASYMM8_SIGNED && src1.data_type() == DataType::QASYMM8_SIGNED && dst.data_type() == DataType::QASYMM8_SIGNED) + && !(src0.data_type() == DataType::QSYMM16 && src1.data_type() == DataType::QSYMM16 && dst.data_type() == DataType::QSYMM16), + "You called addition with the wrong image formats"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst.tensor_shape(), 0), + "Wrong shape for dst"); + } + + const auto *uk = get_implementation(src0.data_type(), src1.data_type(), dst.data_type()); + ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr); + + return Status{}; +} + +std::pair<Status, Window> validate_and_configure_window(const ITensorInfo &src0, const ITensorInfo &src1, ITensorInfo &dst) +{ + const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(src0, src1); + const TensorShape &out_shape = broadcast_pair.first; + const ValidRegion &valid_region = broadcast_pair.second; + + // Auto initialize dst if not initialized + { + set_shape_if_empty(dst, out_shape); + + if(src0.data_type() == DataType::S16 || src1.data_type() == DataType::S16) + { + set_format_if_unknown(dst, Format::S16); + } + if(src0.data_type() == DataType::S32 || src1.data_type() == DataType::S32) + { + set_format_if_unknown(dst, Format::S32); + } + else if(src0.data_type() == DataType::F16 || src1.data_type() == DataType::F16) + { + set_format_if_unknown(dst, Format::F16); + } + else if(src0.data_type() == DataType::F32 || src1.data_type() == DataType::F32) + { + set_format_if_unknown(dst, Format::F32); + } + else if(src0.data_type() == DataType::QASYMM8 || src1.data_type() == DataType::QASYMM8) + { + set_data_type_if_unknown(dst, DataType::QASYMM8); + } + else if(src0.data_type() == DataType::QASYMM8_SIGNED || src1.data_type() == DataType::QASYMM8_SIGNED) + { + set_data_type_if_unknown(dst, DataType::QASYMM8_SIGNED); + } + else if(src0.data_type() == DataType::QSYMM16 || src1.data_type() == DataType::QSYMM16) + { + set_data_type_if_unknown(dst, DataType::QSYMM16); + } + } + + Window win = calculate_max_window(valid_region, Steps()); + + // CpuAddKernel doesn't need padding so update_window_and_padding() can be skipped + Coordinates coord; + coord.set_num_dimensions(dst.num_dimensions()); + dst.set_valid_region(valid_region); + return std::make_pair(Status{}, win); +} +} // namespace + +void CpuAddKernel::configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*src0, *src1, *dst, policy)); + + _policy = policy; + + // Configure kernel window + auto win_config = validate_and_configure_window(*src0, *src1, *dst); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + ICpuKernel::configure(win_config.second); +} + +Status CpuAddKernel::validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, ConvertPolicy policy) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); + + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*src0, *src1, *dst, policy)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(*src0->clone(), *src1->clone(), *dst->clone()).first); + + return Status{}; +} + +void CpuAddKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window); + + ARM_COMPUTE_ERROR_ON(tensors.empty()); + + const ITensor *src0 = tensors.get_const_tensor(TensorType::ACL_SRC_0); + const ITensor *src1 = tensors.get_const_tensor(TensorType::ACL_SRC_1); + ITensor *dst = tensors.get_tensor(TensorType::ACL_DST); + + const auto *uk = get_implementation(src0->info()->data_type(), src1->info()->data_type(), dst->info()->data_type()); + ARM_COMPUTE_ERROR_ON(uk == nullptr || uk->ukernel == nullptr); + + uk->ukernel(src0, src1, dst, _policy, window); +} + +const char *CpuAddKernel::name() const +{ + return "CpuAddKernel"; +} +} // namespace kernels +} // namespace cpu +} // namespace arm_compute diff --git a/src/core/cpu/kernels/CpuAddKernel.h b/src/core/cpu/kernels/CpuAddKernel.h new file mode 100644 index 0000000000..a36ec7ad65 --- /dev/null +++ b/src/core/cpu/kernels/CpuAddKernel.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CPUADDKERNEL_H +#define ARM_COMPUTE_CPUADDKERNEL_H + +#include "src/core/common/Macros.h" +#include "src/core/cpu/ICpuKernel.h" + +namespace arm_compute +{ +namespace cpu +{ +namespace kernels +{ +/** Interface for the kernel to perform addition between two tensors */ +class CpuAddKernel : public ICpuKernel +{ +public: + CpuAddKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuAddKernel); + /** Initialise the kernel's input, dst and border mode. + * + * Valid configurations (src0,src1) -> dst : + * + * - (U8,U8) -> U8 + * - (U8,U8) -> S16 + * - (S16,U8) -> S16 + * - (U8,S16) -> S16 + * - (S16,S16) -> S16 + * - (S32,S32) -> S32 + * - (F16,F16) -> F16 + * - (F32,F32) -> F32 + * - (QASYMM8,QASYMM8) -> QASYMM8 + * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED + * - (QSYMM16,QSYMM16) -> QSYMM16 + * + * @param[in] src0 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 + * @param[in] src1 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 + * @param[out] dst The dst tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. + * @param[in] policy Overflow policy. + */ + void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy); + /** Static function to check if given info will lead to a valid configuration of @ref CpuAddKernel + * + * @param[in] src0 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 + * @param[in] src1 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 + * @param[in] dst The dst tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. + * @param[in] policy Overflow policy. + * + * @return a status + */ + static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, ConvertPolicy policy); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; + const char *name() const override; + +private: + ConvertPolicy _policy{}; +}; +} // namespace kernels +} // namespace cpu +} // namespace arm_compute +#endif /*ARM_COMPUTE_CPUADDKERNEL_H */ diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/integer.cpp b/src/core/cpu/kernels/add/neon/integer.cpp index 0aededfcfd..24a0ac3b7c 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/integer.cpp +++ b/src/core/cpu/kernels/add/neon/integer.cpp @@ -32,21 +32,21 @@ namespace arm_compute { namespace cpu { -void arithmetic_addition_U8_U8_S16_neon(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_u8_u8_s16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { // Create input windows Window win = window; - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually win.set(Window::DimX, Window::Dimension(0, 1, 1)); input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); const int window_step_x = 8; const auto window_start_x = static_cast<int>(window.x().start()); @@ -97,21 +97,21 @@ void arithmetic_addition_U8_U8_S16_neon(const ITensor *in1, const ITensor *in2, input1, input2, output); } -void arithmetic_addition_S16_U8_S16_neon(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_s16_u8_s16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { // Create input windows Window win = window; - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually win.set(Window::DimX, Window::Dimension(0, 1, 1)); input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); const int window_step_x = 8; const auto window_start_x = static_cast<int>(window.x().start()); @@ -161,10 +161,10 @@ void arithmetic_addition_S16_U8_S16_neon(const ITensor *in1, const ITensor *in2, input1, input2, output); } -void arithmetic_addition_U8_S16_S16_neon(const ITensor *input1, const ITensor *input2, ITensor *output, const ConvertPolicy &policy, const Window &window) +void add_u8_s16_s16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { // Simply swap the two input buffers: - arithmetic_addition_S16_U8_S16_neon(input2, input1, output, policy, window); + add_s16_u8_s16_neon(src1, src0, dst, policy, window); } } // namespace cpu } // namespace arm_compute
\ No newline at end of file diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/list.h b/src/core/cpu/kernels/add/neon/list.h index a8ab0910fd..53ea81e284 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/list.h +++ b/src/core/cpu/kernels/add/neon/list.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef SRC_CORE_NEON_KERNELS_ARITHMETIC_ADDITION_LIST_H -#define SRC_CORE_NEON_KERNELS_ARITHMETIC_ADDITION_LIST_H +#ifndef SRC_CORE_NEON_KERNELS_ADD_LIST_H +#define SRC_CORE_NEON_KERNELS_ADD_LIST_H #include "arm_compute/core/Types.h" #include "arm_compute/core/utils/misc/Traits.h" @@ -32,27 +32,27 @@ namespace arm_compute { namespace cpu { -#define DECLARE_ARITHMETIC_ADDITION_KERNEL(func_name) \ - void func_name(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +#define DECLARE_ADD_KERNEL(func_name) \ + void func_name(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_qasymm8_neon); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_qasymm8_signed_neon); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_qsymm16_neon); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_S16_U8_S16_neon); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_U8_S16_S16_neon); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_U8_U8_S16_neon); +DECLARE_ADD_KERNEL(add_qasymm8_neon); +DECLARE_ADD_KERNEL(add_qasymm8_signed_neon); +DECLARE_ADD_KERNEL(add_qsymm16_neon); +DECLARE_ADD_KERNEL(add_s16_u8_s16_neon); +DECLARE_ADD_KERNEL(add_u8_s16_s16_neon); +DECLARE_ADD_KERNEL(add_u8_u8_s16_neon); -#undef DECLARE_ARITHMETIC_ADDITION_KERNEL +#undef DECLARE_ADD_KERNEL template <typename ScalarType> -void arithmetic_addition_same_neon(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { /** NEON vector tag type. */ using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<ScalarType, wrapper::traits::BitWidth::W128>; // Create input windows - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually Window win = window; @@ -61,22 +61,22 @@ void arithmetic_addition_same_neon(const ITensor *in1, const ITensor *in2, ITens constexpr int window_step_x = 16 / sizeof(ScalarType); const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); - const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); + const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); if(is_broadcast_across_x) { const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; - const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; - const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; + const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; + const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; // Clear X Dimension on execution window as we handle manually non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1)); Iterator broadcast_input(broadcast_tensor, broadcast_win); Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); - Iterator output(out, win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -110,9 +110,9 @@ void arithmetic_addition_same_neon(const ITensor *in1, const ITensor *in2, ITens input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -143,4 +143,4 @@ void arithmetic_addition_same_neon(const ITensor *in1, const ITensor *in2, ITens } } // namespace cpu } // namespace arm_compute -#endif // SRC_CORE_NEON_KERNELS_ARITHMETIC_ADDITION_LIST_H
\ No newline at end of file +#endif // SRC_CORE_NEON_KERNELS_ADD_LIST_H
\ No newline at end of file diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/qasymm8.cpp b/src/core/cpu/kernels/add/neon/qasymm8.cpp index 0b3a851fc5..cc97f0067c 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/qasymm8.cpp +++ b/src/core/cpu/kernels/add/neon/qasymm8.cpp @@ -32,13 +32,13 @@ namespace arm_compute { namespace cpu { -void arithmetic_addition_qasymm8_neon(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { ARM_COMPUTE_UNUSED(policy); // Create input windows - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually Window win = window; @@ -47,11 +47,11 @@ void arithmetic_addition_qasymm8_neon(const ITensor *in1, const ITensor *in2, IT const int window_step_x = 16; const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); - const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); + const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); - const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform(); - const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform(); + const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform(); const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale); const float32x4_t voffseto = vdupq_n_f32(oq_info.offset); @@ -61,8 +61,8 @@ void arithmetic_addition_qasymm8_neon(const ITensor *in1, const ITensor *in2, IT const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; - const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; - const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; + const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; + const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform(); const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform(); @@ -76,7 +76,7 @@ void arithmetic_addition_qasymm8_neon(const ITensor *in1, const ITensor *in2, IT Iterator broadcast_input(broadcast_tensor, broadcast_win); Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); - Iterator output(out, win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -140,9 +140,9 @@ void arithmetic_addition_qasymm8_neon(const ITensor *in1, const ITensor *in2, IT input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale); const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale); @@ -199,7 +199,7 @@ void arithmetic_addition_qasymm8_neon(const ITensor *in1, const ITensor *in2, IT { const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale; const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale; - *(output_ptr + x) = quantize_qasymm8((afs + bfs), out->info()->quantization_info()); + *(output_ptr + x) = quantize_qasymm8((afs + bfs), dst->info()->quantization_info()); } }, input1, input2, output); diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/qasymm8_signed.cpp b/src/core/cpu/kernels/add/neon/qasymm8_signed.cpp index 18f5aabb21..d62d0739f5 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/qasymm8_signed.cpp +++ b/src/core/cpu/kernels/add/neon/qasymm8_signed.cpp @@ -32,13 +32,13 @@ namespace arm_compute { namespace cpu { -void arithmetic_addition_qasymm8_signed_neon(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { ARM_COMPUTE_UNUSED(policy); // Create input windows - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually Window win = window; @@ -47,11 +47,11 @@ void arithmetic_addition_qasymm8_signed_neon(const ITensor *in1, const ITensor * const int window_step_x = 16; const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); - const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); + const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); - const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform(); - const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform(); + const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform(); const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale); const float32x4_t voffseto = vdupq_n_f32(oq_info.offset); @@ -61,8 +61,8 @@ void arithmetic_addition_qasymm8_signed_neon(const ITensor *in1, const ITensor * const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; - const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; - const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; + const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; + const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform(); const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform(); @@ -76,7 +76,7 @@ void arithmetic_addition_qasymm8_signed_neon(const ITensor *in1, const ITensor * Iterator broadcast_input(broadcast_tensor, broadcast_win); Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); - Iterator output(out, win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -140,9 +140,9 @@ void arithmetic_addition_qasymm8_signed_neon(const ITensor *in1, const ITensor * input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale); const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale); @@ -198,7 +198,7 @@ void arithmetic_addition_qasymm8_signed_neon(const ITensor *in1, const ITensor * { const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale; const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale; - *(output_ptr + x) = quantize_qasymm8_signed((afs + bfs), out->info()->quantization_info()); + *(output_ptr + x) = quantize_qasymm8_signed((afs + bfs), dst->info()->quantization_info()); } }, input1, input2, output); diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/qsymm16.cpp b/src/core/cpu/kernels/add/neon/qsymm16.cpp index 650f25ed5a..e76e408d6e 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/NEON/qsymm16.cpp +++ b/src/core/cpu/kernels/add/neon/qsymm16.cpp @@ -32,13 +32,13 @@ namespace arm_compute { namespace cpu { -void arithmetic_addition_qsymm16_neon(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_qsymm16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { ARM_COMPUTE_UNUSED(policy); // Create input windows - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually Window win = window; @@ -47,11 +47,11 @@ void arithmetic_addition_qsymm16_neon(const ITensor *in1, const ITensor *in2, IT const int window_step_x = 8; const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); - const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); + const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); - const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform(); - const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform(); + const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform(); const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale); const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale); @@ -62,8 +62,8 @@ void arithmetic_addition_qsymm16_neon(const ITensor *in1, const ITensor *in2, IT const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; - const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; - const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; + const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; + const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform(); const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform(); @@ -72,7 +72,7 @@ void arithmetic_addition_qsymm16_neon(const ITensor *in1, const ITensor *in2, IT Iterator broadcast_input(broadcast_tensor, broadcast_win); Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); - Iterator output(out, win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -123,9 +123,9 @@ void arithmetic_addition_qsymm16_neon(const ITensor *in1, const ITensor *in2, IT input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -164,7 +164,7 @@ void arithmetic_addition_qsymm16_neon(const ITensor *in1, const ITensor *in2, IT { const float afs = static_cast<int32_t>((*(input1_ptr + x))) * iq1_info.scale; const float bfs = static_cast<int32_t>((*(input2_ptr + x))) * iq2_info.scale; - *(output_ptr + x) = quantize_qsymm16((afs + bfs), out->info()->quantization_info()); + *(output_ptr + x) = quantize_qsymm16((afs + bfs), dst->info()->quantization_info()); } }, input1, input2, output); diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/integer.cpp b/src/core/cpu/kernels/add/sve/integer.cpp index c502a0235e..5bd2e12665 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/integer.cpp +++ b/src/core/cpu/kernels/add/sve/integer.cpp @@ -34,21 +34,21 @@ namespace arm_compute { namespace cpu { -void arithmetic_addition_U8_U8_S16_sve(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_u8_u8_s16_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { // Create input windows Window win = window; - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually win.set(Window::DimX, Window::Dimension(0, 1, 1)); input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); @@ -68,15 +68,15 @@ void arithmetic_addition_U8_U8_S16_sve(const ITensor *in1, const ITensor *in2, I svbool_t pg_1 = svwhilelt_b16(x, static_cast<int>(window_end_x + svcnth())); do { - const auto vin1 = svld1(pg_u, input1_ptr + x); - const auto vin2 = svld1(pg_u, input2_ptr + x); + const auto vsrc0 = svld1(pg_u, input1_ptr + x); + const auto vsrc1 = svld1(pg_u, input2_ptr + x); - const auto vin1_lo = svreinterpret_s16_u16(svunpklo(vin1)); - const auto vin1_hi = svreinterpret_s16_u16(svunpkhi(vin1)); - const auto vin2_lo = svreinterpret_s16_u16(svunpklo(vin2)); - const auto vin2_hi = svreinterpret_s16_u16(svunpkhi(vin2)); - svst1(pg_0, output_ptr + x, svqadd(vin1_lo, vin2_lo)); - svst1(pg_1, output_ptr + x + svcnth(), svqadd(vin1_hi, vin2_hi)); + const auto vsrc0_lo = svreinterpret_s16_u16(svunpklo(vsrc0)); + const auto vsrc0_hi = svreinterpret_s16_u16(svunpkhi(vsrc0)); + const auto vsrc1_lo = svreinterpret_s16_u16(svunpklo(vsrc1)); + const auto vsrc1_hi = svreinterpret_s16_u16(svunpkhi(vsrc1)); + svst1(pg_0, output_ptr + x, svqadd(vsrc0_lo, vsrc1_lo)); + svst1(pg_1, output_ptr + x + svcnth(), svqadd(vsrc0_hi, vsrc1_hi)); x += svcntb(); pg_u = svwhilelt_b8(x, window_end_x); @@ -93,15 +93,15 @@ void arithmetic_addition_U8_U8_S16_sve(const ITensor *in1, const ITensor *in2, I svbool_t pg_1 = svwhilelt_b16(x, static_cast<int>(window_end_x + svcnth())); do { - const auto vin1 = svld1(pg_u, input1_ptr + x); - const auto vin2 = svld1(pg_u, input2_ptr + x); + const auto vsrc0 = svld1(pg_u, input1_ptr + x); + const auto vsrc1 = svld1(pg_u, input2_ptr + x); - const auto vin1_lo = svreinterpret_s16_u16(svunpklo(vin1)); - const auto vin1_hi = svreinterpret_s16_u16(svunpkhi(vin1)); - const auto vin2_lo = svreinterpret_s16_u16(svunpklo(vin2)); - const auto vin2_hi = svreinterpret_s16_u16(svunpkhi(vin2)); - svst1(pg_0, output_ptr + x, svqadd(vin1_lo, vin2_lo)); - svst1(pg_1, output_ptr + x + svcnth(), svqadd(vin1_hi, vin2_hi)); + const auto vsrc0_lo = svreinterpret_s16_u16(svunpklo(vsrc0)); + const auto vsrc0_hi = svreinterpret_s16_u16(svunpkhi(vsrc0)); + const auto vsrc1_lo = svreinterpret_s16_u16(svunpklo(vsrc1)); + const auto vsrc1_hi = svreinterpret_s16_u16(svunpkhi(vsrc1)); + svst1(pg_0, output_ptr + x, svqadd(vsrc0_lo, vsrc1_lo)); + svst1(pg_1, output_ptr + x + svcnth(), svqadd(vsrc0_hi, vsrc1_hi)); x += svcntb(); pg_u = svwhilelt_b8(x, window_end_x); @@ -114,21 +114,21 @@ void arithmetic_addition_U8_U8_S16_sve(const ITensor *in1, const ITensor *in2, I input1, input2, output); } -void arithmetic_addition_S16_U8_S16_sve(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_s16_u8_s16_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { // Create input windows Window win = window; - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually win.set(Window::DimX, Window::Dimension(0, 1, 1)); input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); @@ -148,13 +148,13 @@ void arithmetic_addition_S16_U8_S16_sve(const ITensor *in1, const ITensor *in2, svbool_t pg_1 = svwhilelt_b16(x + static_cast<int>(svcnth()), window_end_x); do { - const auto vin1_0 = svld1_s16(pg_0, input1_ptr + x); - const auto vin1_1 = svld1_s16(pg_1, input1_ptr + x + svcnth()); - const auto vin2_u8 = svld1_u8(pg_u, input2_ptr + x); - const auto vin2_0 = svreinterpret_s16_u16(svunpklo(vin2_u8)); - const auto vin2_1 = svreinterpret_s16_u16(svunpkhi(vin2_u8)); - svst1_s16(pg_0, output_ptr + x, svadd_s16_z(pg_0, vin1_0, vin2_0)); - svst1_s16(pg_1, output_ptr + x, svadd_s16_z(pg_1, vin1_1, vin2_1)); + const auto vsrc0_0 = svld1_s16(pg_0, input1_ptr + x); + const auto vsrc0_1 = svld1_s16(pg_1, input1_ptr + x + svcnth()); + const auto vsrc1_u8 = svld1_u8(pg_u, input2_ptr + x); + const auto vsrc1_0 = svreinterpret_s16_u16(svunpklo(vsrc1_u8)); + const auto vsrc1_1 = svreinterpret_s16_u16(svunpkhi(vsrc1_u8)); + svst1_s16(pg_0, output_ptr + x, svadd_s16_z(pg_0, vsrc0_0, vsrc1_0)); + svst1_s16(pg_1, output_ptr + x, svadd_s16_z(pg_1, vsrc0_1, vsrc1_1)); x += svcnth(); pg_u = svwhilelt_b8(x, window_end_x); @@ -171,14 +171,14 @@ void arithmetic_addition_S16_U8_S16_sve(const ITensor *in1, const ITensor *in2, svbool_t pg_1 = svwhilelt_b16(x + static_cast<int>(svcnth()), window_end_x); do { - const auto vin1_0 = svld1_s16(pg_0, input1_ptr + x); - const auto vin1_1 = svld1_s16(pg_1, input1_ptr + x); - const auto vin2_u8 = svld1_u8(pg_u, input2_ptr + x); - const auto vin2_0 = svreinterpret_s16_u16(svunpklo(vin2_u8)); - const auto vin2_1 = svreinterpret_s16_u16(svunpkhi(vin2_u8)); + const auto vsrc0_0 = svld1_s16(pg_0, input1_ptr + x); + const auto vsrc0_1 = svld1_s16(pg_1, input1_ptr + x); + const auto vsrc1_u8 = svld1_u8(pg_u, input2_ptr + x); + const auto vsrc1_0 = svreinterpret_s16_u16(svunpklo(vsrc1_u8)); + const auto vsrc1_1 = svreinterpret_s16_u16(svunpkhi(vsrc1_u8)); - svst1_s16(pg_0, output_ptr + x, svqadd(vin1_0, vin2_0)); - svst1_s16(pg_1, output_ptr + x, svqadd(vin1_1, vin2_1)); + svst1_s16(pg_0, output_ptr + x, svqadd(vsrc0_0, vsrc1_0)); + svst1_s16(pg_1, output_ptr + x, svqadd(vsrc0_1, vsrc1_1)); x += svcnth(); pg_u = svwhilelt_b8(x, window_end_x); @@ -191,10 +191,10 @@ void arithmetic_addition_S16_U8_S16_sve(const ITensor *in1, const ITensor *in2, input1, input2, output); } -void arithmetic_addition_U8_S16_S16_sve(const ITensor *input1, const ITensor *input2, ITensor *output, const ConvertPolicy &policy, const Window &window) +void add_u8_s16_s16_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { // Simply swap the two input buffers: - arithmetic_addition_S16_U8_S16_sve(input2, input1, output, policy, window); + add_s16_u8_s16_sve(src1, src0, dst, policy, window); } } // namespace cpu } // namespace arm_compute diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/list.h b/src/core/cpu/kernels/add/sve/list.h index 3e238c40d0..71dd875ad8 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/list.h +++ b/src/core/cpu/kernels/add/sve/list.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef SRC_CORE_SVE_KERNELS_ARITHMETIC_ADDITION_LIST_H -#define SRC_CORE_SVE_KERNELS_ARITHMETIC_ADDITION_LIST_H +#ifndef SRC_CORE_SVE_KERNELS_ADD_LIST_H +#define SRC_CORE_SVE_KERNELS_ADD_LIST_H #if defined(__ARM_FEATURE_SVE) #include "arm_compute/core/Types.h" @@ -35,25 +35,25 @@ namespace arm_compute { namespace cpu { -#define DECLARE_ARITHMETIC_ADDITION_KERNEL(func_name) \ - void func_name(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +#define DECLARE_ADD_KERNEL(func_name) \ + void func_name(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_qasymm8_sve); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_qasymm8_signed_sve); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_qsymm16_sve); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_S16_U8_S16_sve); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_U8_S16_S16_sve); -DECLARE_ARITHMETIC_ADDITION_KERNEL(arithmetic_addition_U8_U8_S16_sve); +DECLARE_ADD_KERNEL(add_qasymm8_sve); +DECLARE_ADD_KERNEL(add_qasymm8_signed_sve); +DECLARE_ADD_KERNEL(add_qsymm16_sve); +DECLARE_ADD_KERNEL(add_s16_u8_s16_sve); +DECLARE_ADD_KERNEL(add_u8_s16_s16_sve); +DECLARE_ADD_KERNEL(add_u8_u8_s16_sve); -#undef DECLARE_ARITHMETIC_ADDITION_KERNEL +#undef DECLARE_ADD_KERNEL template <typename ScalarType> -void arithmetic_addition_same_sve(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_same_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { const auto all_true_pg = wrapper::svptrue<ScalarType>(); const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); - const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); + const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); const bool is_sat = (policy == ConvertPolicy::SATURATE); // Clear X Dimension on execution window as we handle manually @@ -61,27 +61,27 @@ void arithmetic_addition_same_sve(const ITensor *in1, const ITensor *in2, ITenso win.set(Window::DimX, Window::Dimension(0, 1, 1)); // Create input windows - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); - Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape())); - Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape())); - Iterator output(out, window); + Iterator input1(src0, window.broadcast_if_dimension_le_one(src0->info()->tensor_shape())); + Iterator input2(src1, window.broadcast_if_dimension_le_one(src1->info()->tensor_shape())); + Iterator output(dst, window); if(is_broadcast_across_x) { const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; - const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; - const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; + const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; + const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; // Clear X Dimension on execution window as we handle manually non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1)); Iterator broadcast_input(broadcast_tensor, broadcast_win); Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); - Iterator output(out, win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -112,9 +112,9 @@ void arithmetic_addition_same_sve(const ITensor *in1, const ITensor *in2, ITenso input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -142,4 +142,4 @@ void arithmetic_addition_same_sve(const ITensor *in1, const ITensor *in2, ITenso } // namespace cpu } // namespace arm_compute #endif // defined(__ARM_FEATURE_SVE) -#endif // SRC_CORE_SVE_KERNELS_ARITHMETIC_ADDITION_LIST_H
\ No newline at end of file +#endif // SRC_CORE_SVE_KERNELS_ADD_LIST_H
\ No newline at end of file diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/qasymm8.cpp b/src/core/cpu/kernels/add/sve/qasymm8.cpp index 871ee23ded..c47b5abf8a 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/qasymm8.cpp +++ b/src/core/cpu/kernels/add/sve/qasymm8.cpp @@ -34,13 +34,13 @@ namespace arm_compute { namespace cpu { -void arithmetic_addition_qasymm8_sve(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_qasymm8_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { ARM_COMPUTE_UNUSED(policy); // Create input windows - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually Window win = window; @@ -48,12 +48,12 @@ void arithmetic_addition_qasymm8_sve(const ITensor *in1, const ITensor *in2, ITe const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); - const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); + const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); const auto all_true_pg = svptrue_b8(); - const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform(); - const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform(); + const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform(); const auto invvscaleo = svdup_n_f32(1.f / oq_info.scale); const auto voffseto = svdup_n_f32(oq_info.offset); @@ -63,8 +63,8 @@ void arithmetic_addition_qasymm8_sve(const ITensor *in1, const ITensor *in2, ITe const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; - const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; - const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; + const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; + const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; const svfloat32_t vscale1 = is_broadcast_input_2 ? svdup_n_f32(iq1_info.scale) : svdup_n_f32(iq2_info.scale); const svfloat32_t vscale2 = is_broadcast_input_2 ? svdup_n_f32(iq2_info.scale) : svdup_n_f32(iq1_info.scale); @@ -76,7 +76,7 @@ void arithmetic_addition_qasymm8_sve(const ITensor *in1, const ITensor *in2, ITe Iterator broadcast_input(broadcast_tensor, broadcast_win); Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); - Iterator output(out, win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -127,9 +127,9 @@ void arithmetic_addition_qasymm8_sve(const ITensor *in1, const ITensor *in2, ITe input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); const auto vscale1 = svdup_n_f32(iq1_info.scale); const auto vscale2 = svdup_n_f32(iq2_info.scale); diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/qasymm8_signed.cpp b/src/core/cpu/kernels/add/sve/qasymm8_signed.cpp index 2ba5d39400..75d0f75a65 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/qasymm8_signed.cpp +++ b/src/core/cpu/kernels/add/sve/qasymm8_signed.cpp @@ -34,13 +34,13 @@ namespace arm_compute { namespace cpu { -void arithmetic_addition_qasymm8_signed_sve(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_qasymm8_signed_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { ARM_COMPUTE_UNUSED(policy); // Create input windows - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually Window win = window; @@ -48,11 +48,11 @@ void arithmetic_addition_qasymm8_signed_sve(const ITensor *in1, const ITensor *i const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); - const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); + const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); - const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform(); - const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform(); + const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform(); const auto invvscaleo = svdup_n_f32(1.f / oq_info.scale); const auto voffseto = svdup_n_f32(oq_info.offset); @@ -62,8 +62,8 @@ void arithmetic_addition_qasymm8_signed_sve(const ITensor *in1, const ITensor *i const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; - const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; - const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; + const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; + const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; const auto all_true_pg = svptrue_b8(); const auto vscale1 = is_broadcast_input_2 ? svdup_n_f32(iq1_info.scale) : svdup_n_f32(iq2_info.scale); @@ -76,7 +76,7 @@ void arithmetic_addition_qasymm8_signed_sve(const ITensor *in1, const ITensor *i Iterator broadcast_input(broadcast_tensor, broadcast_win); Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); - Iterator output(out, win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -125,9 +125,9 @@ void arithmetic_addition_qasymm8_signed_sve(const ITensor *in1, const ITensor *i input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); const auto vscale1 = svdup_n_f32(iq1_info.scale); const auto vscale2 = svdup_n_f32(iq2_info.scale); diff --git a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/qsymm16.cpp b/src/core/cpu/kernels/add/sve/qsymm16.cpp index c072cdb249..c3b72a5e65 100644 --- a/src/core/NEON/kernels/arithmetic_addition/impl/SVE/qsymm16.cpp +++ b/src/core/cpu/kernels/add/sve/qsymm16.cpp @@ -34,13 +34,13 @@ namespace arm_compute { namespace cpu { -void arithmetic_addition_qsymm16_sve(const ITensor *in1, const ITensor *in2, ITensor *out, const ConvertPolicy &policy, const Window &window) +void add_qsymm16_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) { ARM_COMPUTE_UNUSED(policy); // Create input windows - Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); - Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()); + Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); + Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); // Clear X Dimension on execution window as we handle manually Window win = window; @@ -48,11 +48,11 @@ void arithmetic_addition_qsymm16_sve(const ITensor *in1, const ITensor *in2, ITe const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); - const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); + const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); - const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform(); - const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform(); + const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform(); const auto vscale1 = svdup_n_f32(iq1_info.scale); const auto vscale2 = svdup_n_f32(iq2_info.scale); @@ -64,15 +64,15 @@ void arithmetic_addition_qsymm16_sve(const ITensor *in1, const ITensor *in2, ITe const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; - const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; - const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; + const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; + const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; // Clear X Dimension on execution window as we handle manually non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1)); Iterator broadcast_input(broadcast_tensor, broadcast_win); Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); - Iterator output(out, win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { @@ -114,9 +114,9 @@ void arithmetic_addition_qsymm16_sve(const ITensor *in1, const ITensor *in2, ITe input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input1(in1, input1_win); - Iterator input2(in2, input2_win); - Iterator output(out, win); + Iterator input1(src0, input1_win); + Iterator input2(src1, input2_win); + Iterator output(dst, win); execute_window_loop(win, [&](const Coordinates &) { diff --git a/src/runtime/NEON/functions/NEArithmeticAddition.cpp b/src/runtime/NEON/functions/NEArithmeticAddition.cpp index 1eaccf3396..2e4755b949 100644 --- a/src/runtime/NEON/functions/NEArithmeticAddition.cpp +++ b/src/runtime/NEON/functions/NEArithmeticAddition.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,37 +23,19 @@ */ #include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h" -#include "arm_compute/core/ITensor.h" -#include "src/core/NEON/kernels/NEArithmeticAdditionKernel.h" +#include "arm_compute/core/Validate.h" +#include "src/runtime/cpu/operators/CpuAdd.h" #include <utility> namespace arm_compute { -namespace experimental -{ -NEArithmeticAddition::~NEArithmeticAddition() = default; - -void NEArithmeticAddition::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) -{ - ARM_COMPUTE_UNUSED(act_info); - auto k = std::make_unique<NEArithmeticAdditionKernel>(); - k->configure(input1, input2, output, policy); - _kernel = std::move(k); -} -Status NEArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) -{ - ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled()); - return NEArithmeticAdditionKernel::validate(input1, input2, output, policy); -} -} // namespace experimental - struct NEArithmeticAddition::Impl { - const ITensor *src_0{ nullptr }; - const ITensor *src_1{ nullptr }; - ITensor *dst{ nullptr }; - std::unique_ptr<experimental::NEArithmeticAddition> op{ nullptr }; + const ITensor *src_0{ nullptr }; + const ITensor *src_1{ nullptr }; + ITensor *dst{ nullptr }; + std::unique_ptr<cpu::CpuAdd> op{ nullptr }; }; NEArithmeticAddition::NEArithmeticAddition() @@ -66,7 +48,7 @@ NEArithmeticAddition::~NEArithmeticAddition() = Status NEArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) { - return experimental::NEArithmeticAddition::validate(input1, input2, output, policy, act_info); + return cpu::CpuAdd::validate(input1, input2, output, policy, act_info); } void NEArithmeticAddition::configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) @@ -74,8 +56,8 @@ void NEArithmeticAddition::configure(const ITensor *input1, const ITensor *input _impl->src_0 = input1; _impl->src_1 = input2; _impl->dst = output; - _impl->op = std::make_unique<experimental::NEArithmeticAddition>(); - _impl->op->configure(input1->info(), input2->info(), output->info(), policy, act_info); + _impl->op = std::make_unique<cpu::CpuAdd>(); + _impl->op->configure(_impl->src_0->info(), _impl->src_1->info(), _impl->dst->info(), policy, act_info); } void NEArithmeticAddition::run() diff --git a/src/runtime/cpu/operators/CpuAdd.cpp b/src/runtime/cpu/operators/CpuAdd.cpp new file mode 100644 index 0000000000..280350f589 --- /dev/null +++ b/src/runtime/cpu/operators/CpuAdd.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/runtime/cpu/operators/CpuAdd.h" + +#include "src/core/cpu/kernels/CpuAddKernel.h" + +namespace arm_compute +{ +namespace cpu +{ +void CpuAdd::configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info) +{ + ARM_COMPUTE_UNUSED(act_info); + auto k = std::make_unique<kernels::CpuAddKernel>(); + k->configure(src0, src1, dst, policy); + _kernel = std::move(k); +} + +Status CpuAdd::validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info) +{ + ARM_COMPUTE_UNUSED(act_info); + return kernels::CpuAddKernel::validate(src0, src1, dst, policy); +} +} // namespace cpu +} // namespace arm_compute diff --git a/src/runtime/cpu/operators/CpuAdd.h b/src/runtime/cpu/operators/CpuAdd.h new file mode 100644 index 0000000000..7ddc69b49a --- /dev/null +++ b/src/runtime/cpu/operators/CpuAdd.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CPU_ADD_H +#define ARM_COMPUTE_CPU_ADD_H + +#include "src/runtime/cpu/ICpuOperator.h" + +namespace arm_compute +{ +namespace cpu +{ +/** Basic function to run @ref CpuAddKernel */ +class CpuAdd : public ICpuOperator +{ +public: + /** Constructor */ + CpuAdd() = default; + /** Initialise the kernel's input, dst and border mode. + * + * Valid configurations (src0,src1) -> dst : + * + * - (U8,U8) -> U8 + * - (U8,U8) -> S16 + * - (S16,U8) -> S16 + * - (U8,S16) -> S16 + * - (S16,S16) -> S16 + * - (S32,S32) -> S32 + * - (F16,F16) -> F16 + * - (F32,F32) -> F32 + * - (QASYMM8,QASYMM8) -> QASYMM8 + * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED + * - (QSYMM16,QSYMM16) -> QSYMM16 + * + * @param[in] src0 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 + * @param[in] src1 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 + * @param[out] dst The dst tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. + * @param[in] policy Overflow policy. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + * + */ + void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref CpuAddKernel + * + * @param[in] src0 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 + * @param[in] src1 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 + * @param[in] dst The dst tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. + * @param[in] policy Overflow policy. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + * + * @return a status + */ + static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); +}; +} // namespace cpu +} // namespace arm_compute +#endif /* ARM_COMPUTE_CPU_ADD_H */ |