From ae72a46e495742863dba44fcf5fdc673c9d2afbc Mon Sep 17 00:00:00 2001 From: Gunes Bayir Date: Sun, 29 Jan 2023 13:24:24 +0000 Subject: =?UTF-8?q?Add=20new=20operator=20AddMulAdd=20for=20Neon=E2=84=A2?= =?UTF-8?q?=20backend=20for=20Float/Quantized=20types?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a fused operator that merges Add + Mul + Add [+ Relu-based-Activation] layers and have an intermediate output after the first Add. It's supported for FP16/32/QASYMM8/QASYMM8_SIGNED data types. The subsequent Add and Mul are intended for scaling and the coefficients only have one dimension (per channel). The inputs are - input1 : nD tensor [X, Y, Z, W, ..] - input2 : nD tensor [X, Y, Z, W, ..] - add_coef : 1D tensor [X] - mul_coef : 1D tensor [X] The outputs are - out1 : nD tensor (intermediate output) [X, Y, Z, W, ..] - out2 : nD tensor (final output) [X, Y, Z, W, ..] The operation can be summarized as follows: out1 <- input1 + input2 out2 <- Act(out1 * mul_coef + add_coef) The activation function can be Identity, Relu, Bounded Relu or Lower/Upper Bounded Relu. The intermediate output can be skipped by providing a nullptr. The reason of providing this operator is to be able to fuse in case of Residual network patterns and save computations by reducing memory back and forward. Resolves: COMPMID-5463 Signed-off-by: Gunes Bayir Change-Id: I8ef577aa623b036e9a9f655cc088493fd19a6109 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9055 Comments-Addressed: Arm Jenkins Reviewed-by: Jakub Sujak Reviewed-by: Viet-Hoa Do Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- src/runtime/NEON/functions/NEAddMulAdd.cpp | 82 ++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 src/runtime/NEON/functions/NEAddMulAdd.cpp (limited to 'src/runtime/NEON') diff --git a/src/runtime/NEON/functions/NEAddMulAdd.cpp b/src/runtime/NEON/functions/NEAddMulAdd.cpp new file mode 100644 index 0000000000..55008de5d6 --- /dev/null +++ b/src/runtime/NEON/functions/NEAddMulAdd.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/runtime/NEON/functions/NEAddMulAdd.h" + +#include "src/common/utils/Log.h" +#include "src/core/helpers/MemoryHelpers.h" +#include "src/cpu/operators/CpuAddMulAdd.h" + +namespace arm_compute +{ +struct NEAddMulAdd::Impl +{ + std::unique_ptr op{ nullptr }; + WorkspaceData workspace_tensors{}; + ITensorPack run_pack{}; + MemoryGroup memory_group{}; +}; + +NEAddMulAdd::NEAddMulAdd(std::shared_ptr memory_manager) + : _impl(std::make_unique()) +{ + _impl->memory_group = MemoryGroup(std::move(memory_manager)); +} + +NEAddMulAdd::~NEAddMulAdd() = default; + +void NEAddMulAdd::configure(ITensor *input1, ITensor *input2, ITensor *bn_mul, ITensor *bn_add, ITensor *add_output, + ITensor *final_output, const ConvertPolicy policy, const ActivationLayerInfo &act_info) +{ + ARM_COMPUTE_LOG_PARAMS(input1, input2, bn_mul, bn_add, add_output, final_output, policy, act_info); + + _impl->op = std::make_unique(); + _impl->op->configure(input1->info(), input2->info(), bn_mul->info(), + bn_add->info(), add_output != nullptr ? add_output->info() : nullptr, final_output->info(), policy, act_info); + + _impl->run_pack = + { + { TensorType::ACL_SRC_0, input1 }, + { TensorType::ACL_SRC_1, input2 }, + { TensorType::ACL_SRC_2, bn_mul }, + { TensorType::ACL_SRC_3, bn_add }, + { TensorType::ACL_DST_0, add_output }, + { TensorType::ACL_DST_1, final_output }, + }; + + _impl->workspace_tensors = manage_workspace(_impl->op->workspace(), _impl->memory_group, _impl->run_pack); +} + +Status NEAddMulAdd::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *bn_mul, + const ITensorInfo *bn_add, const ITensorInfo *add_output, const ITensorInfo *final_output, + ConvertPolicy policy, const ActivationLayerInfo &act_info) +{ + return cpu::CpuAddMulAdd::validate(input1, input2, bn_mul, bn_add, add_output, final_output, policy, act_info); +} + +void NEAddMulAdd::run() +{ + _impl->op->run(_impl->run_pack); +} +} // namespace arm_compute -- cgit v1.2.1