aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
authorJakub Sujak <jakub.sujak@arm.com>2023-08-24 14:01:20 +0100
committerJakub Sujak <jakub.sujak@arm.com>2023-09-04 14:41:16 +0000
commit0d27b2ee8d811d66693555ac1e7be44d93e662e2 (patch)
tree8b62a464a8bb9cd46702c8b5a60f3a97e3821b41 /arm_compute/core
parent7ff03b67ba7ce669223f4d807e18fa3efa2f729b (diff)
downloadComputeLibrary-0d27b2ee8d811d66693555ac1e7be44d93e662e2.tar.gz
Remove legacy PostOps code
PostOps was the experimental interface for Dynamic Fusion. It is now replaced by the new Dynamic Fusion interface with code generation using the Compute Kernel Writer. Resolves: COMPMID-6190 Change-Id: I813b48facef2fd6f3aee332588886b4f9b3d33d8 Signed-off-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10219 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/KernelDescriptors.h74
-rw-r--r--arm_compute/core/Types.h51
-rw-r--r--arm_compute/core/experimental/IPostOp.h180
-rw-r--r--arm_compute/core/experimental/PostOps.h163
-rw-r--r--arm_compute/core/experimental/Types.h13
5 files changed, 64 insertions, 417 deletions
diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h
index 305766e825..2bf5dee18c 100644
--- a/arm_compute/core/KernelDescriptors.h
+++ b/arm_compute/core/KernelDescriptors.h
@@ -21,12 +21,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS
-#define ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS
+#ifndef ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS_H
+#define ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS_H
#include "arm_compute/core/PixelValue.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/core/experimental/IPostOp.h"
#include "arm_compute/function_info/ActivationLayerInfo.h"
namespace arm_compute
@@ -60,46 +59,43 @@ struct GEMMKernelInfo
{
GEMMKernelInfo() = default;
GEMMKernelInfo(
- unsigned int im,
- unsigned int in,
- unsigned int ik,
- unsigned int idepth_output_gemm3d,
- bool ireinterpret_input_as_3d,
- bool ibroadcast_bias,
- bool ifp_mixed_precision,
- bool ihas_pad_y,
- ActivationLayerInfo iactivation_info,
- int inmult_transpose1xW_width,
- int imult_interleave4x4_height,
- GEMMLHSMatrixInfo ilhs_info,
- GEMMRHSMatrixInfo irhs_info,
- int32_t ina_offset,
- int32_t inb_offset,
- const experimental::PostOpList<ITensorInfo *> &ipost_ops = experimental::PostOpList<ITensorInfo *> {})
+ unsigned int im,
+ unsigned int in,
+ unsigned int ik,
+ unsigned int idepth_output_gemm3d,
+ bool ireinterpret_input_as_3d,
+ bool ibroadcast_bias,
+ bool ifp_mixed_precision,
+ bool ihas_pad_y,
+ ActivationLayerInfo iactivation_info,
+ int inmult_transpose1xW_width,
+ int imult_interleave4x4_height,
+ GEMMLHSMatrixInfo ilhs_info,
+ GEMMRHSMatrixInfo irhs_info,
+ int32_t ina_offset,
+ int32_t inb_offset)
: m(im), n(in), k(ik), depth_output_gemm3d(idepth_output_gemm3d), reinterpret_input_as_3d(ireinterpret_input_as_3d), broadcast_bias(ibroadcast_bias), fp_mixed_precision(ifp_mixed_precision),
has_pad_y(ihas_pad_y), activation_info(iactivation_info), mult_transpose1xW_width(inmult_transpose1xW_width), mult_interleave4x4_height(imult_interleave4x4_height), lhs_info(ilhs_info),
- rhs_info(irhs_info), a_offset(ina_offset), b_offset(inb_offset), post_ops(ipost_ops)
+ rhs_info(irhs_info), a_offset(ina_offset), b_offset(inb_offset)
{
}
- unsigned int m{ 0 }; /**< Number of LHS rows*/
- unsigned int n{ 0 }; /**< Number of RHS columns*/
- unsigned int k{ 0 }; /**< Number of LHS columns or RHS rows */
- unsigned int depth_output_gemm3d{ 0 }; /**< Depth of the output tensor in case is reinterpreted as 3D */
- bool reinterpret_input_as_3d{ false }; /**< Flag used to reinterpret the input as 3D */
- bool broadcast_bias{ false }; /**< Flag used to broadcast the bias addition */
- bool fp_mixed_precision{ false }; /**< Flag used to indicate wider accumulators (32 bit instead of 16 for FP16). */
- bool has_pad_y{ false }; /**< Flag used to indicate if the input/output tensors have internal pad on the y direction */
- ActivationLayerInfo activation_info{}; /**< Activation function to perform after the matrix multiplication */
- int mult_transpose1xW_width{ 1 }; /**< Multiplication factor for the width of the 1xW transposed block */
- int mult_interleave4x4_height{ 1 }; /**< Multiplication factor for the height of the 4x4 interleaved block */
- GEMMLHSMatrixInfo lhs_info{}; /**< LHS matrix information used to retrieve the number of rows processed by each thread */
- GEMMRHSMatrixInfo rhs_info{}; /**< RHS matrix information used for reshaping the RHS matrix */
- int32_t a_offset{ 0 }; /**< Offset to be added to each element of the matrix A */
- int32_t b_offset{ 0 }; /**< Offset to be added to each element of the matrix B */
- GEMMLowpOutputStageInfo output_stage{}; /**< GEMMLowp output stage information */
- experimental::PostOpList<ITensorInfo *> post_ops{}; /**< (EXPERIMENTAL_POST_OPS) Specifies a list of post ops to be fused after the main op. Note unsupported post ops would not be executed.
- * If specified, automatically disable the @ref activation_info */
+ unsigned int m{ 0 }; /**< Number of LHS rows*/
+ unsigned int n{ 0 }; /**< Number of RHS columns*/
+ unsigned int k{ 0 }; /**< Number of LHS columns or RHS rows */
+ unsigned int depth_output_gemm3d{ 0 }; /**< Depth of the output tensor in case is reinterpreted as 3D */
+ bool reinterpret_input_as_3d{ false }; /**< Flag used to reinterpret the input as 3D */
+ bool broadcast_bias{ false }; /**< Flag used to broadcast the bias addition */
+ bool fp_mixed_precision{ false }; /**< Flag used to indicate wider accumulators (32 bit instead of 16 for FP16). */
+ bool has_pad_y{ false }; /**< Flag used to indicate if the input/output tensors have internal pad on the y direction */
+ ActivationLayerInfo activation_info{}; /**< Activation function to perform after the matrix multiplication */
+ int mult_transpose1xW_width{ 1 }; /**< Multiplication factor for the width of the 1xW transposed block */
+ int mult_interleave4x4_height{ 1 }; /**< Multiplication factor for the height of the 4x4 interleaved block */
+ GEMMLHSMatrixInfo lhs_info{}; /**< LHS matrix information used to retrieve the number of rows processed by each thread */
+ GEMMRHSMatrixInfo rhs_info{}; /**< RHS matrix information used for reshaping the RHS matrix */
+ int32_t a_offset{ 0 }; /**< Offset to be added to each element of the matrix A */
+ int32_t b_offset{ 0 }; /**< Offset to be added to each element of the matrix B */
+ GEMMLowpOutputStageInfo output_stage{}; /**< GEMMLowp output stage information */
};
/** Compute descriptor used by the depthwise convolution native kernel */
@@ -240,4 +236,4 @@ struct MatMulKernelInfo
bool export_rhs_to_cl_image{ false }; /**< Flag to know whether the RHS tensor should be exported to cl_image*/
};
} // namespace arm_compute
-#endif /* ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS */
+#endif // ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS_H
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 12d860205e..9264cefe3e 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ACL_ARM_COMPUTE_CORE_TYPES
-#define ACL_ARM_COMPUTE_CORE_TYPES
+#ifndef ACL_ARM_COMPUTE_CORE_TYPES_H
+#define ACL_ARM_COMPUTE_CORE_TYPES_H
/** The following symbols have been moved to:
* half
@@ -65,7 +65,6 @@
#include "arm_compute/core/Size2D.h"
#include "arm_compute/core/Size3D.h"
#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/experimental/IPostOp.h"
#include "arm_compute/core/utils/misc/Macros.h"
#include "support/Bfloat16.h"
@@ -751,14 +750,14 @@ public:
}
private:
- std::vector<float> _min_sizes;
- std::vector<float> _variances;
- float _offset;
- bool _flip;
- bool _clip;
- std::vector<float> _max_sizes;
- std::vector<float> _aspect_ratios;
- Coordinates2D _img_size;
+ std::vector<float> _min_sizes;
+ std::vector<float> _variances;
+ float _offset;
+ bool _flip;
+ bool _clip;
+ std::vector<float> _max_sizes;
+ std::vector<float> _aspect_ratios;
+ Coordinates2D _img_size;
std::array<float, 2> _steps;
};
@@ -1003,15 +1002,15 @@ public:
}
private:
- unsigned int _max_detections;
- unsigned int _max_classes_per_detection;
- float _nms_score_threshold;
- float _iou_threshold;
- unsigned int _num_classes;
+ unsigned int _max_detections;
+ unsigned int _max_classes_per_detection;
+ float _nms_score_threshold;
+ float _iou_threshold;
+ unsigned int _num_classes;
std::array<float, 4> _scales_values;
- bool _use_regular_nms;
- unsigned int _detection_per_class;
- bool _dequantize_scores;
+ bool _use_regular_nms;
+ unsigned int _detection_per_class;
+ bool _dequantize_scores;
};
/** Pooling Layer Information struct*/
@@ -1462,13 +1461,13 @@ public:
}
private:
- float _img_width;
- float _img_height;
- float _scale;
- bool _apply_scale;
- bool _correct_transform_coords;
+ float _img_width;
+ float _img_height;
+ float _scale;
+ bool _apply_scale;
+ bool _correct_transform_coords;
std::array<float, 4> _weights;
- float _bbox_xform_clip;
+ float _bbox_xform_clip;
};
/** Normalization Layer Information class */
@@ -1915,4 +1914,4 @@ struct IOFormatInfo
/** Class for holding information related to cropping */
using CropInfo = Padding2D;
} // namespace arm_compute
-#endif /* ACL_ARM_COMPUTE_CORE_TYPES */
+#endif // ACL_ARM_COMPUTE_CORE_TYPES_H
diff --git a/arm_compute/core/experimental/IPostOp.h b/arm_compute/core/experimental/IPostOp.h
deleted file mode 100644
index 567a4023c0..0000000000
--- a/arm_compute/core/experimental/IPostOp.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_EXPERIMENTAL_IPOSTOP
-#define ARM_COMPUTE_EXPERIMENTAL_IPOSTOP
-
-#include <memory>
-#include <numeric>
-#include <vector>
-
-namespace arm_compute
-{
-namespace experimental
-{
-/** Type of Post Op */
-enum class PostOpType
-{
- Activation,
- Eltwise_Add,
- Eltwise_PRelu
-};
-/** An ordered sequence of type of Post Ops */
-using PostOpTypeSequence = std::vector<PostOpType>;
-/** An elementwise n-ary operation that can be appended to and fused with (at kernel-level) other operators
- * It contains:
- * 1. The attributes of the original operator.
- * 2. Any additional tensor argument.
- * 3. The position of the previous op's dst tensor in its argument list ( @ref prev_dst_pos )
- *
- * For example, a series of chained ops:
- *
- * div(src1, relu(conv(src0, weights, bias, conv_info), act_info), div_info)
- *
- * translates to
- *
- * dst = conv(src0, weights, bias, conv_info) // main op
- * dst = relu(dst, act_info) // previous dst is placed in the first (and only) argument
- * dst = div(src1, dst, div_info) // previous dst is placed in the second argument
- *
- * which in turn translates to:
- *
- * main op: conv(src0, weights, bias, conv_info)
- * post op1: relu(act_info, prev_dst_pos = 0)
- * post op2: div(div_info, src1, prev_dst_pos = 1)
- *
- * @note: On Broadcasting
- * For n-ary post ops, the tensor arguments must not "widen" the dst tensor of the main op
- * For example, for a dst of shape [14, 1, 34]:
- * * post_op_arg1 = [1, 1, 34] is allowed: broadcast in dim 0
- * * post_op_arg1 = [14, 1, 34] is allowed: no broadcast
- * * post_op_arg1 = [1, 1, 34] is allowed: broadcast in dims 0 and 1
- * * post_op_arg1 = [14, 15, 34] is NOT allowed: broadcast widens the dst tensor
- *
- * @note: On Data layout
- * All post ops are data layout agnostic. This means post ops do not have an inherent idea of "width", "height" and so on.
- * Should we want to perform a post op with 2 tensors of different data layouts (where data layouts are significant to both),
- * then we need to perform necessary permutation op beforehand to unify their data layout before they can be fused with a post op
- *
- * Note although post ops themselves should be able to support any data layout, the main op they fuse to may impose
- * additional restrictions in the presence of post ops. For example, the implementation of a gemm op may only allow
- * NHWC data layout if post ops are provided. Such restrictions are main op implementation specific.
- *
- * @note: PostOps do not own any resources pointed to by TensorRelatedT if it's a pointer type
- * @note: If TensorRelatedT points to a resource, IPostOp assumes that resource is valid throughout its lifetime
- * and the lifetime of its copies. This is almost guaranteed as IPostOp is only meant to be used at configure time
- * after the ITensor or ITensorInfo objects are already constructed
- */
-template <typename TensorRelatedT>
-struct IPostOp
-{
- /** Get the arity of the post op
- * @note: that this is one fewer than the arity of the original op, because we implicitly pass the previous op's dst
- * tensor as one of the arguments
- */
- size_t arity() const
- {
- return arguments().size();
- }
- /** The position of previous op's dst in current op's argument list */
- virtual int prev_dst_pos() const = 0;
- /** The IPostOp type */
- virtual PostOpType type() const = 0;
- /** The argument tensors
- * The order of the argument tensor is strictly preserved
- */
- virtual std::vector<TensorRelatedT *> arguments() = 0;
- virtual std::vector<const TensorRelatedT *> arguments() const = 0;
- /** Clone method used in cases where PostOps are owned by unique_ptr
- * @note: This performs a shallow copy of the TensorRelatedT if TensorRelatedT points to a resource
- */
- virtual std::unique_ptr<IPostOp<TensorRelatedT>> clone() const = 0;
- virtual ~IPostOp()
- {
- }
-};
-
-/** A sequence of PostOps that can be appended to the end of other operators */
-template <typename TensorRelatedT>
-class PostOpList
-{
-public:
- /** Constructor */
- PostOpList() = default;
- /** Destructor */
- ~PostOpList() = default;
- PostOpList(const PostOpList &other)
- {
- for(const auto &op : other._post_ops)
- {
- this->_post_ops.push_back(op->clone());
- }
- }
- PostOpList &operator=(const PostOpList &other)
- {
- PostOpList tmp{ other };
- std::swap(tmp, *this);
- return *this;
- }
- PostOpList(PostOpList &&other) = default;
- PostOpList &operator=(PostOpList &&other) = default;
-
- /** Add a new post op at the end of the list */
- template <typename OpT, typename... Args>
- void push_back_op(Args &&... args)
- {
- _post_ops.push_back(std::make_unique<OpT>(std::forward<Args>(args)...));
- }
-
- /** Number of post ops */
- size_t size() const
- {
- return _post_ops.size();
- }
-
- /** Total number of post ops */
- size_t total_num_arguments() const
- {
- return std::accumulate(_post_ops.begin(), _post_ops.end(), 0, [](size_t op1_arity, const auto & op2)
- {
- return op1_arity + op2->arity();
- });
- }
-
- /** Get the underlying post op list */
- std::vector<std::unique_ptr<IPostOp<TensorRelatedT>>> &get_list()
- {
- return _post_ops;
- }
- const std::vector<std::unique_ptr<IPostOp<TensorRelatedT>>> &get_list() const
- {
- return _post_ops;
- }
-
-private:
- std::vector<std::unique_ptr<IPostOp<TensorRelatedT>>> _post_ops{};
-};
-
-} // namespace experimental
-} // namespace arm_compute
-#endif //ARM_COMPUTE_EXPERIMENTAL_IPOSTOP
diff --git a/arm_compute/core/experimental/PostOps.h b/arm_compute/core/experimental/PostOps.h
deleted file mode 100644
index a5585bab5d..0000000000
--- a/arm_compute/core/experimental/PostOps.h
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (c) 2021, 2023 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_EXPERIMENTAL_POSTOPS
-#define ARM_COMPUTE_EXPERIMENTAL_POSTOPS
-
-#include "arm_compute/core/experimental/IPostOp.h"
-
-#include "arm_compute/core/Types.h"
-#include "arm_compute/function_info/ActivationLayerInfo.h"
-
-#include <vector>
-
-namespace arm_compute
-{
-namespace experimental
-{
-/** (EXPERIMENTAL_POST_OPS)
- * Implementation of specific IPostOps
-*/
-
-template <typename TensorRelatedT>
-struct PostOpAct : public IPostOp<TensorRelatedT>
-{
-public:
- PostOpAct(const ActivationLayerInfo &act_info)
- : _act_info{ act_info }
- {
- }
- // NOTE: PostOps do not own any resources pointed to by TensorRelatedT if it's a pointer type, thus allow shallow copy
- ~PostOpAct() override = default;
- PostOpAct(const PostOpAct &) = default;
- PostOpAct &operator=(const PostOpAct &) = default;
- PostOpAct(PostOpAct &&) = default;
- PostOpAct &operator=(PostOpAct &&) = default;
-
- int prev_dst_pos() const override
- {
- return 0;
- }
- PostOpType type() const override
- {
- return PostOpType::Activation;
- }
- std::vector<TensorRelatedT *> arguments() override
- {
- return {};
- }
- std::vector<const TensorRelatedT *> arguments() const override
- {
- return {};
- }
- std::unique_ptr<IPostOp<TensorRelatedT>> clone() const override
- {
- return std::make_unique<PostOpAct<TensorRelatedT>>(*this);
- }
- ActivationLayerInfo _act_info;
-};
-
-template <typename TensorRelatedT>
-struct PostOpEltwiseAdd : public IPostOp<TensorRelatedT>
-{
-public:
- PostOpEltwiseAdd(TensorRelatedT addend, int prev_dst_pos, ConvertPolicy policy)
- : _addend{ addend },
- _prev_dst_pos{ prev_dst_pos },
- _policy{ policy }
- {
- }
- // NOTE: PostOps do not own any resources pointed to by TensorRelatedT if it's a pointer type, thus allow shallow copy
- ~PostOpEltwiseAdd() override = default;
- PostOpEltwiseAdd(const PostOpEltwiseAdd &) = default;
- PostOpEltwiseAdd &operator=(const PostOpEltwiseAdd &) = default;
- PostOpEltwiseAdd(PostOpEltwiseAdd &&) = default;
- PostOpEltwiseAdd &operator=(PostOpEltwiseAdd &&) = default;
- int prev_dst_pos() const override
- {
- return _prev_dst_pos;
- }
- PostOpType type() const override
- {
- return PostOpType::Eltwise_Add;
- }
- std::vector<TensorRelatedT *> arguments() override
- {
- return { &_addend };
- }
- std::vector<const TensorRelatedT *> arguments() const override
- {
- return { &_addend };
- }
- std::unique_ptr<IPostOp<TensorRelatedT>> clone() const override
- {
- return std::make_unique<PostOpEltwiseAdd<TensorRelatedT>>(*this);
- }
- TensorRelatedT _addend;
- int _prev_dst_pos;
- ConvertPolicy _policy;
-};
-
-template <typename TensorRelatedT>
-struct PostOpEltwisePRelu : public IPostOp<TensorRelatedT>
-{
-public:
- PostOpEltwisePRelu(TensorRelatedT alpha_param, int prev_dst_pos, ConvertPolicy policy)
- : _alpha_param{ alpha_param },
- _prev_dst_pos{ prev_dst_pos },
- _policy{ policy }
- {
- }
- // NOTE: PostOps do not own any resources pointed to by TensorRelatedT if it's a pointer type, thus allow shallow copy
- ~PostOpEltwisePRelu() override = default;
- PostOpEltwisePRelu(const PostOpEltwisePRelu &) = default;
- PostOpEltwisePRelu &operator=(const PostOpEltwisePRelu &) = default;
- PostOpEltwisePRelu(PostOpEltwisePRelu &&) = default;
- PostOpEltwisePRelu &operator=(PostOpEltwisePRelu &&) = default;
- int prev_dst_pos() const override
- {
- return _prev_dst_pos;
- }
- PostOpType type() const override
- {
- return PostOpType::Eltwise_PRelu;
- }
- std::vector<TensorRelatedT *> arguments() override
- {
- return { &_alpha_param };
- }
- std::vector<const TensorRelatedT *> arguments() const override
- {
- return { &_alpha_param };
- }
- std::unique_ptr<IPostOp<TensorRelatedT>> clone() const override
- {
- return std::make_unique<PostOpEltwisePRelu<TensorRelatedT>>(*this);
- }
- TensorRelatedT _alpha_param;
- int _prev_dst_pos;
- ConvertPolicy _policy;
-};
-} // namespace experimental
-} // namespace arm_compute
-#endif //ARM_COMPUTE_EXPERIMENTAL_POSTOPS
diff --git a/arm_compute/core/experimental/Types.h b/arm_compute/core/experimental/Types.h
index 1995ab045e..8dd6812b58 100644
--- a/arm_compute/core/experimental/Types.h
+++ b/arm_compute/core/experimental/Types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022 Arm Limited.
+ * Copyright (c) 2020-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_EXPERIMENTAL_TYPES_H
-#define ARM_COMPUTE_EXPERIMENTAL_TYPES_H
+#ifndef ACL_ARM_COMPUTE_CORE_EXPERIMENTAL_TYPES_H
+#define ACL_ARM_COMPUTE_CORE_EXPERIMENTAL_TYPES_H
#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/TensorShape.h"
@@ -78,11 +78,6 @@ enum TensorType : int32_t
ACL_VEC_COL_SUM = ACL_SRC_4,
ACL_SHIFTS = ACL_SRC_5,
ACL_MULTIPLIERS = ACL_SRC_6,
-
- // (EXPERIMENTAL_POST_OPS) Post ops arguments begin after everything else
- EXPERIMENTAL_ACL_POST_OP_ARG = 2048,
- EXPERIMENTAL_ACL_POST_OP_ARG_FIRST = EXPERIMENTAL_ACL_POST_OP_ARG,
- EXPERIMENTAL_ACL_POST_OP_ARG_LAST = EXPERIMENTAL_ACL_POST_OP_ARG_FIRST + 1024, // Max number of post op arguments
};
namespace experimental
@@ -134,4 +129,4 @@ struct MemoryInfo
using MemoryRequirements = std::vector<MemoryInfo>;
} // namespace experimental
} // namespace arm_compute
-#endif /* ARM_COMPUTE_EXPERIMENTAL_TYPES_H */
+#endif // ACL_ARM_COMPUTE_CORE_EXPERIMENTAL_TYPES_H