diff options
author | SiCongLi <sicong.li@arm.com> | 2021-10-06 15:25:57 +0100 |
---|---|---|
committer | SiCong Li <sicong.li@arm.com> | 2021-10-28 11:00:52 +0000 |
commit | 1af5416917268692fcd4b34b1d7ffebd3a2aea8a (patch) | |
tree | 81833ecad401eeb0101fb0d464728df8b699caf8 /src/core/experimental | |
parent | 49956ccf029ff4c1873e3a6702b5bede95d81f7a (diff) | |
download | ComputeLibrary-1af5416917268692fcd4b34b1d7ffebd3a2aea8a.tar.gz |
Add experimental PostOp interface to ClGemmMatrixMultiplyReshapedKernel Part 1
This interface supports the fusion of multiple elementwise operations
Partially resolves: COMPMID-4435
Change-Id: If68dd7dd98dcf239fde7cb1f0a4a6d4d1e899a6f
Signed-off-by: SiCongLi <sicong.li@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6483
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/experimental')
-rw-r--r-- | src/core/experimental/PostOp.h | 171 |
1 files changed, 171 insertions, 0 deletions
diff --git a/src/core/experimental/PostOp.h b/src/core/experimental/PostOp.h new file mode 100644 index 0000000000..64414d2050 --- /dev/null +++ b/src/core/experimental/PostOp.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_EXPERIMENTAL_POSTOP +#define ARM_COMPUTE_EXPERIMENTAL_POSTOP + +#include "arm_compute/core/experimental/IPostOp.h" + +#include "arm_compute/core/Types.h" +#include "arm_compute/core/experimental/Types.h" +#include "support/Cast.h" + +#include <vector> + +/** (EXPERIMENTAL_POST_OPS) */ +namespace arm_compute +{ +namespace experimental +{ +template <typename TensorRelatedT> +struct PostOpAct : public IPostOp<TensorRelatedT> +{ +public: + PostOpAct(const ActivationLayerInfo &act_info) + : _act_info{ act_info } + { + } + // NOTE: PostOps do not own any resources pointed to by TensorRelatedT if it's a pointer type, thus allow shallow copy + ~PostOpAct() override = default; + PostOpAct(const PostOpAct &) = default; + PostOpAct &operator=(const PostOpAct &) = default; + PostOpAct(PostOpAct &&) = default; + PostOpAct &operator=(PostOpAct &&) = default; + + int prev_dst_pos() const override + { + return 0; + } + PostOpType type() const override + { + return PostOpType::Activation; + } + std::vector<TensorRelatedT *> arguments() override + { + return {}; + } + std::vector<const TensorRelatedT *> arguments() const override + { + return {}; + } + std::unique_ptr<IPostOp<TensorRelatedT>> clone() const override + { + return std::make_unique<PostOpAct<TensorRelatedT>>(*this); + } + ActivationLayerInfo _act_info; +}; + +template <typename TensorRelatedT> +struct PostOpEltwiseAdd : public IPostOp<TensorRelatedT> +{ +public: + PostOpEltwiseAdd(TensorRelatedT addend, int prev_op_arg_pos, ConvertPolicy policy) + : _addend{ addend }, + _prev_op_arg_pos{ prev_op_arg_pos }, + _policy{ policy } + { + } + // NOTE: PostOps do not own any resources pointed to by TensorRelatedT if it's a pointer type, thus allow shallow copy + ~PostOpEltwiseAdd() override = default; + PostOpEltwiseAdd(const PostOpEltwiseAdd &) = default; + PostOpEltwiseAdd &operator=(const PostOpEltwiseAdd &) = default; + PostOpEltwiseAdd(PostOpEltwiseAdd &&) = default; + PostOpEltwiseAdd &operator=(PostOpEltwiseAdd &&) = default; + int prev_dst_pos() const override + { + return _prev_op_arg_pos; + } + PostOpType type() const override + { + return PostOpType::Eltwise_Add; + } + std::vector<TensorRelatedT *> arguments() override + { + return { &_addend }; + } + std::vector<const TensorRelatedT *> arguments() const override + { + return { &_addend }; + } + std::unique_ptr<IPostOp<TensorRelatedT>> clone() const override + { + return std::make_unique<PostOpEltwiseAdd<TensorRelatedT>>(*this); + } + TensorRelatedT _addend; + int _prev_op_arg_pos; + ConvertPolicy _policy; +}; + +/** Transform a PostOpList of type FromTensorT to one of type ToTensorT */ +template <typename FromTensorT, typename ToTensorT> +PostOpList<ToTensorT> transform_post_op_list_arguments(const PostOpList<FromTensorT> &post_ops, std::function<ToTensorT(FromTensorT)> transform_arg) +{ + PostOpList<ToTensorT> transformed_post_ops; + int op_idx = 0; + for(const auto &post_op : post_ops.get_list()) + { + switch(post_op->type()) + { + case PostOpType::Activation: + { + const auto _post_op = utils::cast::polymorphic_downcast<const PostOpAct<FromTensorT> *>(post_op.get()); + transformed_post_ops.template push_back_op<PostOpAct<ToTensorT>>(_post_op->_act_info); + break; + } + case PostOpType::Eltwise_Add: + { + const auto _post_op = utils::cast::polymorphic_downcast<const PostOpEltwiseAdd<FromTensorT> *>(post_op.get()); + transformed_post_ops.template push_back_op<PostOpEltwiseAdd<ToTensorT>>(transform_arg(_post_op->_addend), _post_op->_prev_op_arg_pos, _post_op->_policy); + break; + } + default: + { + ARM_COMPUTE_ERROR("Unsupported PostOpType"); + } + } + ++op_idx; + } + return transformed_post_ops; +} + +/** Get post op argument TensorType from post op argument index in a flattened, ordered post op argument list */ +inline TensorType get_post_op_arg_type(size_t index) +{ + ARM_COMPUTE_ERROR_ON_MSG(static_cast<int>(index) > EXPERIMENTAL_ACL_POST_OP_ARG_LAST - EXPERIMENTAL_ACL_POST_OP_ARG_FIRST, "Post Op argument index is out of range"); + return static_cast<TensorType>(EXPERIMENTAL_ACL_POST_OP_ARG_FIRST + static_cast<int>(index)); +} + +template <typename T> +PostOpTypeSequence get_post_op_sequence(const PostOpList<T> &post_ops) +{ + PostOpTypeSequence post_op_sequence; + for(const auto &op : post_ops.get_list()) + { + post_op_sequence.push_back(op->type()); + } + return post_op_sequence; +} + +} // namespace experimental +} // namespace arm_compute +#endif //ARM_COMPUTE_EXPERIMENTAL_POSTOP
\ No newline at end of file |