aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/graph
diff options
context:
space:
mode:
authorJakub Sujak <jakub.sujak@arm.com>2023-08-24 14:01:20 +0100
committerJakub Sujak <jakub.sujak@arm.com>2023-09-04 14:41:16 +0000
commit0d27b2ee8d811d66693555ac1e7be44d93e662e2 (patch)
tree8b62a464a8bb9cd46702c8b5a60f3a97e3821b41 /arm_compute/graph
parent7ff03b67ba7ce669223f4d807e18fa3efa2f729b (diff)
downloadComputeLibrary-0d27b2ee8d811d66693555ac1e7be44d93e662e2.tar.gz
Remove legacy PostOps code
PostOps was the experimental interface for Dynamic Fusion. It is now replaced by the new Dynamic Fusion interface with code generation using the Compute Kernel Writer. Resolves: COMPMID-6190 Change-Id: I813b48facef2fd6f3aee332588886b4f9b3d33d8 Signed-off-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10219 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/graph')
-rw-r--r--arm_compute/graph/DataLayerVisitor.h9
-rw-r--r--arm_compute/graph/INode.h33
-rw-r--r--arm_compute/graph/INodeVisitor.h20
-rw-r--r--arm_compute/graph/TypePrinter.h14
-rw-r--r--arm_compute/graph/Types.h74
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h188
-rw-r--r--arm_compute/graph/backends/FusedConvolutionBatchNormalizationWithPostOpsFunction.h136
-rw-r--r--arm_compute/graph/backends/ValidateHelpers.h44
-rw-r--r--arm_compute/graph/nodes/FusedConvolutionBatchNormalizationWithPostOpsNode.h127
-rw-r--r--arm_compute/graph/nodes/FusedConvolutionWithPostOpNode.h132
-rw-r--r--arm_compute/graph/nodes/Nodes.h10
-rw-r--r--arm_compute/graph/nodes/NodesFwd.h10
-rw-r--r--arm_compute/graph/printers/DotGraphPrinter.h10
13 files changed, 54 insertions, 753 deletions
diff --git a/arm_compute/graph/DataLayerVisitor.h b/arm_compute/graph/DataLayerVisitor.h
index ac7f1c84ee..11d9f1ddc9 100644
--- a/arm_compute/graph/DataLayerVisitor.h
+++ b/arm_compute/graph/DataLayerVisitor.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_DATALAYERPRINTER_H
-#define ARM_COMPUTE_GRAPH_DATALAYERPRINTER_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_DATALAYERVISITOR_H
+#define ACL_ARM_COMPUTE_GRAPH_DATALAYERVISITOR_H
#include "arm_compute/graph/IGraphPrinter.h"
#include "arm_compute/graph/INodeVisitor.h"
@@ -48,7 +48,6 @@ public:
void visit(ConvolutionLayerNode &n) override;
void visit(DepthwiseConvolutionLayerNode &n) override;
void visit(FusedConvolutionBatchNormalizationNode &n) override;
- void visit(FusedConvolutionBatchNormalizationWithPostOpsNode &n) override;
void visit(FusedDepthwiseConvolutionBatchNormalizationNode &n) override;
void visit(OutputNode &n) override;
@@ -59,4 +58,4 @@ private:
};
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_DATALAYERPRINTER_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_DATALAYERVISITOR_H
diff --git a/arm_compute/graph/INode.h b/arm_compute/graph/INode.h
index becd672d90..5646ea81d2 100644
--- a/arm_compute/graph/INode.h
+++ b/arm_compute/graph/INode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019,2021 Arm Limited.
+ * Copyright (c) 2018-2019,2021,2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_INODE_H
-#define ARM_COMPUTE_GRAPH_INODE_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_INODE_H
+#define ACL_ARM_COMPUTE_GRAPH_INODE_H
#include "arm_compute/core/Error.h"
#include "arm_compute/graph/LayerDescriptors.h"
@@ -241,30 +241,19 @@ public:
* @return Assigned target of this node
*/
Target assigned_target() const;
- /** Post operator info list
- *
- * @return Post operator info list
- */
- const std::list<std::unique_ptr<ConvPostOpInfo>> &post_op_info_list() const;
- /** Post operator info list
- *
- * @return Post operator info list
- */
- std::list<std::unique_ptr<ConvPostOpInfo>> &post_op_info_list();
protected:
friend class Graph;
protected:
- Graph *_graph; /**< Backward reference to graph owning the node */
- NodeID _id; /**< Node ID */
- NodeParams _common_params; /**< Node common params */
- std::vector<TensorID> _outputs; /**< Output of the node */
- std::vector<EdgeID> _input_edges; /**< Inputs edge set */
- std::set<EdgeID> _output_edges; /**< Output edge set */
- Target _assigned_target; /**< Assigned target by the Graph executor */
- std::list<std::unique_ptr<ConvPostOpInfo>> _post_op_info_list; /**< Post operator info list */
+ Graph *_graph; /**< Backward reference to graph owning the node */
+ NodeID _id; /**< Node ID */
+ NodeParams _common_params; /**< Node common params */
+ std::vector<TensorID> _outputs; /**< Output of the node */
+ std::vector<EdgeID> _input_edges; /**< Inputs edge set */
+ std::set<EdgeID> _output_edges; /**< Output edge set */
+ Target _assigned_target; /**< Assigned target by the Graph executor */
};
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_INODE_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_INODE_H
diff --git a/arm_compute/graph/INodeVisitor.h b/arm_compute/graph/INodeVisitor.h
index 97e95336ef..efe191adfc 100644
--- a/arm_compute/graph/INodeVisitor.h
+++ b/arm_compute/graph/INodeVisitor.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_INODEVISITOR_H
-#define ARM_COMPUTE_GRAPH_INODEVISITOR_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_INODEVISITOR_H
+#define ACL_ARM_COMPUTE_GRAPH_INODEVISITOR_H
#include "arm_compute/graph/nodes/NodesFwd.h"
@@ -106,16 +106,6 @@ public:
* @param[in] n Node to visit.
*/
virtual void visit(FusedConvolutionBatchNormalizationNode &n) = 0;
- /** Visit FusedConvolutionBatchNormalizationWithPostOpsNode.
- *
- * @param[in] n Node to visit.
- */
- virtual void visit(FusedConvolutionBatchNormalizationWithPostOpsNode &n) = 0;
- /** Visit FusedConvolutionWithPostOpNode.
- *
- * @param[in] n Node to visit.
- */
- virtual void visit(FusedConvolutionWithPostOpNode &n) = 0;
/** Visit FusedDepthwiseConvolutionBatchNormalizationNode.
*
* @param[in] n Node to visit.
@@ -215,8 +205,6 @@ public:
virtual void visit(FlattenLayerNode &n) override;
virtual void visit(FullyConnectedLayerNode &n) override;
virtual void visit(FusedConvolutionBatchNormalizationNode &n) override;
- virtual void visit(FusedConvolutionBatchNormalizationWithPostOpsNode &n) override;
- virtual void visit(FusedConvolutionWithPostOpNode &n) override;
virtual void visit(FusedDepthwiseConvolutionBatchNormalizationNode &n) override;
virtual void visit(InputNode &n) override;
virtual void visit(NormalizationLayerNode &n) override;
@@ -240,4 +228,4 @@ public:
};
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_INODEVISITOR_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_INODEVISITOR_H
diff --git a/arm_compute/graph/TypePrinter.h b/arm_compute/graph/TypePrinter.h
index 8f97bbf845..9df4eba5ec 100644
--- a/arm_compute/graph/TypePrinter.h
+++ b/arm_compute/graph/TypePrinter.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_TYPE_PRINTER_H
-#define ARM_COMPUTE_GRAPH_TYPE_PRINTER_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_TYPEPRINTER_H
+#define ACL_ARM_COMPUTE_GRAPH_TYPEPRINTER_H
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Types.h"
@@ -116,12 +116,6 @@ inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
case NodeType::FusedConvolutionBatchNormalizationLayer:
os << "FusedConvolutionBatchNormalizationLayer";
break;
- case NodeType::FusedConvolutionBatchNormalizationLayerWithPostOpsLayer:
- os << "FusedConvolutionBatchNormalizationLayerWithPostOpsLayer";
- break;
- case NodeType::FusedConvolutionWithPostOp:
- os << "FusedConvolutionWithPostOp";
- break;
case NodeType::FusedDepthwiseConvolutionBatchNormalizationLayer:
os << "FusedDepthwiseConvolutionBatchNormalizationLayer";
break;
@@ -295,4 +289,4 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DepthwiseConvolution
}
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_TYPE_PRINTER_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_TYPEPRINTER_H
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index 167f7388d4..8d493403b3 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_TYPES_H
-#define ARM_COMPUTE_GRAPH_TYPES_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_TYPES_H
+#define ACL_ARM_COMPUTE_GRAPH_TYPES_H
#include "arm_compute/core/Error.h"
#include "arm_compute/core/PixelValue.h"
@@ -41,32 +41,31 @@ namespace arm_compute
{
namespace graph
{
-using arm_compute::CLTunerMode;
using arm_compute::CLBackendType;
+using arm_compute::CLTunerMode;
using arm_compute::Status;
using arm_compute::Coordinates;
-using arm_compute::DataType;
using arm_compute::DataLayout;
using arm_compute::DataLayoutDimension;
-using arm_compute::TensorShape;
-using arm_compute::Size2D;
+using arm_compute::DataType;
using arm_compute::PermutationVector;
using arm_compute::PixelValue;
+using arm_compute::Size2D;
+using arm_compute::TensorShape;
using arm_compute::ActivationLayerInfo;
using arm_compute::DetectionOutputLayerInfo;
using arm_compute::DetectionPostProcessLayerInfo;
-using arm_compute::NormType;
-using arm_compute::NormalizationLayerInfo;
+using arm_compute::DimensionRoundingType;
using arm_compute::FullyConnectedLayerInfo;
+using arm_compute::InterpolationPolicy;
+using arm_compute::NormalizationLayerInfo;
+using arm_compute::NormType;
using arm_compute::PadStrideInfo;
using arm_compute::PoolingLayerInfo;
using arm_compute::PoolingType;
using arm_compute::PriorBoxLayerInfo;
-using arm_compute::DimensionRoundingType;
-using arm_compute::InterpolationPolicy;
-using arm_compute::experimental::PostOpType;
using GraphID = unsigned int;
using TensorID = unsigned int;
@@ -150,55 +149,6 @@ enum class FastMathHint
Disabled, /**< Fast math disabled for Convolution layer */
};
-/** Convolution post operator info */
-class ConvPostOpInfo
-{
-public:
- /** Returns post op type
- *
- * @return Post op type
- */
- virtual PostOpType type() const = 0;
- virtual ~ConvPostOpInfo()
- {
- }
-};
-
-class ConvPostOpInfoActivation : public ConvPostOpInfo
-{
-public:
- ConvPostOpInfoActivation(const ActivationLayerInfo &act)
- : _act(act)
- {
- }
- ~ConvPostOpInfoActivation() override
- {
- }
- PostOpType type() const override
- {
- return PostOpType::Activation;
- }
- ActivationLayerInfo _act;
-};
-
-class ConvPostOpInfoEltwiseAdd : public ConvPostOpInfo
-{
-public:
- ConvPostOpInfoEltwiseAdd(int arg_pos, const ConvertPolicy &policy)
- : _prev_op_dst_pos(arg_pos), _policy(policy)
- {
- }
- PostOpType type() const override
- {
- return PostOpType::Eltwise_Add;
- }
- ~ConvPostOpInfoEltwiseAdd() override
- {
- }
- int _prev_op_dst_pos;
- ConvertPolicy _policy;
-};
-
/** Supported nodes */
enum class NodeType
{
@@ -219,8 +169,6 @@ enum class NodeType
FlattenLayer,
FullyConnectedLayer,
FusedConvolutionBatchNormalizationLayer,
- FusedConvolutionWithPostOp,
- FusedConvolutionBatchNormalizationLayerWithPostOpsLayer,
FusedDepthwiseConvolutionBatchNormalizationLayer,
GenerateProposalsLayer,
L2NormalizeLayer,
@@ -278,4 +226,4 @@ struct NodeParams
};
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_TYPES_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_TYPES_H
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index 803283e20d..a567427bf1 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,18 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
-#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_BACKENDS_FUNCTIONHELPERS_H
+#define ACL_ARM_COMPUTE_GRAPH_BACKENDS_FUNCTIONHELPERS_H
-#include "arm_compute/core/experimental/IPostOp.h"
-#include "arm_compute/core/experimental/PostOps.h"
#include "arm_compute/graph/Logger.h"
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/graph/TypePrinter.h"
#include "arm_compute/graph/Types.h"
#include "arm_compute/graph/Utils.h"
#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
-#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationWithPostOpsFunction.h"
#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
#include "arm_compute/graph/backends/Utils.h"
#include "arm_compute/graph/nodes/Nodes.h"
@@ -541,183 +538,6 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
return std::move(func);
}
-/** Create a backend convolution layer function with post operator
- *
- * @tparam ConvolutionLayerFunctions Backend convolution functions
- * @tparam TargetInfo Target-specific information
- *
- * @param[in] node Node to create the backend function for
- * @param[in] ctx Graph context
- *
- * @return Backend convolution layer function
- */
-template <typename ConvolutionLayerFunctions, typename TargetInfo>
-std::unique_ptr<IFunction> create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
-{
- validate_node<TargetInfo>(node, 4 /* expected inputs */, 1 /* expected outputs */);
-
- // Extract IO and info
- typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
- typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
- typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
- typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
-
- const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
-
- if(is_quantized)
- {
- biases->info()->set_data_type(DataType::S32);
- }
-
- const PadStrideInfo conv_info = node.convolution_info();
- const unsigned int num_groups = node.num_groups();
- const ActivationLayerInfo fused_act = node.fused_activation();
-
- experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
-
- auto &post_op_info_list = node.post_op_info_list();
- for(const auto &post_op_info : post_op_info_list)
- {
- switch(post_op_info->type())
- {
- case PostOpType::Activation:
- {
- const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
- post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
- break;
- }
- case PostOpType::Eltwise_Add:
- {
- typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
- const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
- post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Unsupported PostOpType");
- }
- }
- }
-
- // Create and configure function (we assume that functions have been validated before creation)
- std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
- std::unique_ptr<IFunction> func;
- std::string func_name;
-
- // Fuse convolution with post ops is only supported for conv1x1, which is only implemented as gemmconv2d
- std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
- std::string("GEMMConvolutionLayer"), mm,
- input, weights, biases, output, conv_info,
- WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops);
-
- // Log info
- std::ostringstream qss;
- if(is_quantized)
- {
- qss << " Input QuantInfo: " << input->info()->quantization_info()
- << " Weights QuantInfo: " << weights->info()->quantization_info()
- << " Output QuantInfo: " << output->info()->quantization_info();
- }
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << func_name
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Groups: " << num_groups
- << " Input shape: " << input->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << qss.str()
- << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
- << " Post ops" << post_ops
- << std::endl);
- return std::move(func);
-}
-
-/** Create a backend convolution batch normalization layer function with post operator
- *
- * @tparam FusedLayerTypes Backend convolution functions
- * @tparam TargetInfo Target-specific information
- *
- * @param[in] node Node to create the backend function for
- * @param[in] ctx Graph context
- *
- * @return Backend fused convolution with batch normalization layer function
- */
-template <typename FusedLayerTypes, typename TargetInfo>
-std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_with_post_op(FusedConvolutionBatchNormalizationWithPostOpsNode &node, GraphContext &ctx)
-{
- validate_node<TargetInfo>(node, 8 /* expected inputs */, 1 /* expected outputs */);
-
- // Extract IO and info
- typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
- typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
- typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
- typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
- typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
- typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
- typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
-
- typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
-
- const PadStrideInfo conv_info = node.convolution_info();
- const unsigned int num_groups = node.num_groups();
- const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
- const float epsilon = node.epsilon();
-
- experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
-
- auto &post_op_info_list = node.post_op_info_list();
- for(const auto &post_op_info : post_op_info_list)
- {
- switch(post_op_info->type())
- {
- case PostOpType::Activation:
- {
- const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
- post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
- break;
- }
- case PostOpType::Eltwise_Add:
- {
- typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
- const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
- post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Unsupported PostOpType");
- }
- }
- }
-
- // Create and configure function (we assume that functions have been validated before creation)
- std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
- std::unique_ptr<IFunction> func;
- std::string func_name;
-
- using FType = FusedConvolutionBatchNormalizationWithPostOpsFunction<TargetInfo, FusedLayerTypes>;
-
- // Create and configure function
- std::tie(func, func_name) = create_named_memory_managed_function<FType>(
- std::string("FusedConvolutionBatchNormalizationLayerWithPostOpsLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, post_ops);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " Post Ops:" << post_ops
- << std::endl);
- return std::move(func);
-}
-
/** Create a backend deconvolution layer function
*
* @tparam DeconvolutionLayerFunction Backend deconvolution function
@@ -2025,4 +1845,4 @@ std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &nod
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_BACKENDS_FUNCTIONHELPERS_H
diff --git a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationWithPostOpsFunction.h b/arm_compute/graph/backends/FusedConvolutionBatchNormalizationWithPostOpsFunction.h
deleted file mode 100644
index 10f2e5c25e..0000000000
--- a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationWithPostOpsFunction.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef ARM_COMPUTE_GRAPH_BACKENDS_FUSED_CONVOLUTION_BATCH_NORMAZLIZATION_WITH_POST_OPS_FUNCTION_H
-#define ARM_COMPUTE_GRAPH_BACKENDS_FUSED_CONVOLUTION_BATCH_NORMAZLIZATION_WITH_POST_OPS_FUNCTION_H
-
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/experimental/IPostOp.h"
-#include "arm_compute/runtime/IFunction.h"
-
-namespace arm_compute
-{
-namespace graph
-{
-namespace backends
-{
-/** Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE, CL}ConvolutionLayer with the modified weights */
-template <typename TargetInfo, typename FusedLayerTypes>
-class FusedConvolutionBatchNormalizationWithPostOpsFunction : public IFunction
-{
-public:
- using TensorType = typename TargetInfo::TensorType;
- using TensorConcreteType = typename TargetInfo::TensorConcreteType;
-
- FusedConvolutionBatchNormalizationWithPostOpsFunction(std::shared_ptr<IMemoryManager> memory_manager = nullptr)
- : _conv_layer(memory_manager), _fused_batch_norm_layer(), _fused_bias(), _is_prepared(false)
- {
- }
-
- /** Set the input and output tensors.
- *
- * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
- * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
- * Data type supported: Should match @p input data type.
- * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
- * Data types supported: Same as @p input.
- * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
- * @param[in] beta Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
- * @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
- * @param[in] epsilon Small value to avoid division with zero. Default value is 0.001f.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] num_groups Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- * @param[in] fast_math Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
- * available which may introduce a drop of accuracy as well. Default is false
- * @param[in] post_ops A sequence of post operations that are performed after the main operation.
- *
- */
- void configure(TensorType *input,
- TensorType *weights,
- TensorType *bias,
- TensorType *output,
- const TensorType *mean,
- const TensorType *var,
- const TensorType *beta,
- const TensorType *gamma,
- float epsilon, const PadStrideInfo &conv_info, unsigned int num_groups, bool fast_math,
- const arm_compute::experimental::PostOpList<TensorType *> &post_ops = experimental::PostOpList<TensorType *> {})
- {
- // We don't run any validate, as we assume that the layers have been already validated
- const bool has_bias = (bias != nullptr);
- const TensorType *bias_to_use;
-
- // We check if the layer has a bias. If yes, use it in-place. If not, we need to create one
- // as batch normalization might end up with a bias != 0
- if(has_bias)
- {
- _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon);
- bias_to_use = bias;
- }
- else
- {
- _fused_batch_norm_layer.configure(weights, mean, var, nullptr, &_fused_bias, nullptr, beta, gamma, epsilon);
- bias_to_use = &_fused_bias;
- }
-
- ActivationLayerInfo fused_act = ActivationLayerInfo(); // Passing an empty ActivationLayerInfo.
- _conv_layer.configure(input, weights, bias_to_use, output, conv_info, WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups, post_ops);
-
- if(!has_bias)
- {
- _fused_bias.allocator()->allocate();
- }
- }
-
- // Inherited methods overridden:
- void run()
- {
- prepare();
- _conv_layer.run();
- }
-
- void prepare()
- {
- if(!_is_prepared)
- {
- _fused_batch_norm_layer.run();
- _is_prepared = true;
- }
- }
-
-private:
- typename FusedLayerTypes::ConvolutionLayer _conv_layer;
- typename FusedLayerTypes::FuseBatchNormalization _fused_batch_norm_layer;
- TensorConcreteType _fused_bias;
- bool _is_prepared;
-};
-} // namespace backends
-} // namespace graph
-} // namespace arm_compute
-
-#endif /* ARM_COMPUTE_GRAPH_BACKENDS_FUSED_CONVOLUTION_BATCH_NORMAZLIZATION_WITH_POST_OPS_FUNCTION_H */
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index 89dccd88b7..71a6201554 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_VALIDATE_HELPERS_H
-#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_VALIDATE_HELPERS_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_BACKENDS_VALIDATEHELPERS_H
+#define ACL_ARM_COMPUTE_GRAPH_BACKENDS_VALIDATEHELPERS_H
#include "arm_compute/graph/Logger.h"
#include "arm_compute/graph/Tensor.h"
@@ -183,42 +183,6 @@ Status validate_convolution_layer(ConvolutionLayerNode &node)
return status;
}
-/** Validates a Convolution layer node
- *
- * @tparam GEMMConvolutionLayer GEMM Convolution layer function type
- *
- * @param[in] node Node to validate
- *
- * @return Status
- */
-template <typename GEMMConvolutionLayer>
-Status validate_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node)
-{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating fused ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
- ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 4);
- ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
-
- // Extract IO and info
- arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
- arm_compute::ITensorInfo *weights = get_backing_tensor_info(node.input(1));
- arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
- arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
-
- if(is_data_type_quantized_asymmetric(input->data_type()))
- {
- biases->set_data_type(DataType::S32);
- }
-
- const PadStrideInfo conv_info = node.convolution_info();
- //const ConvolutionMethod conv_algorithm = node.convolution_method();
- //const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
- const unsigned int num_groups = node.num_groups();
-
- // Validate function
- return GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
- WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
-}
-
/** Validates a Depthwise Convolution layer node
*
* @tparam DepthwiseConvolutionLayer Default Depthwise Convolution layer type
@@ -775,4 +739,4 @@ Status validate_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_VALIDATE_HELPERS_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_BACKENDS_VALIDATEHELPERS_H
diff --git a/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationWithPostOpsNode.h b/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationWithPostOpsNode.h
deleted file mode 100644
index a42e06d889..0000000000
--- a/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationWithPostOpsNode.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_GRAPH_FUSED_CONVOLUTION_BATCH_NORMALIZATION_WITH_POST_OPS_NODE_H
-#define ARM_COMPUTE_GRAPH_FUSED_CONVOLUTION_BATCH_NORMALIZATION_WITH_POST_OPS_NODE_H
-
-#include "arm_compute/graph/INode.h"
-
-namespace arm_compute
-{
-namespace graph
-{
-/** Batch Normalization node */
-class FusedConvolutionBatchNormalizationWithPostOpsNode final : public INode
-{
-public:
- /** Constructor
- *
- * @param[in] epsilon Epsilon parameter.
- * @param[in] info Convolution layer attributes.
- * @param[in] num_groups (Optional) Number of groups (Defaults to 1)
- * @param[in] method (Optional) Convolution method to use
- * @param[in] fast_math_hint (Optional) Fast math hint
- */
- FusedConvolutionBatchNormalizationWithPostOpsNode(float epsilon, PadStrideInfo info,
- unsigned int num_groups = 1,
- ConvolutionMethod method = ConvolutionMethod::Default,
- FastMathHint fast_math_hint = FastMathHint::Disabled);
-
- /** Epsilon parameter accessor
- *
- * @return Epsilon parameter
- */
- float epsilon() const;
-
- /** Computes convolution output descriptor
- *
- * @param[in] input_descriptor Input descriptor
- * @param[in] weights_descriptor Weights descriptor
- * @param[in] info Convolution operation attributes
- *
- * @return Output descriptor
- */
- static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
- const TensorDescriptor &weights_descriptor,
- const PadStrideInfo &info);
-
- /** Sets the convolution layer method to use
- *
- * @param[in] method Method to use for convolution
- */
- void set_convolution_method(ConvolutionMethod method);
-
- /** Number of groups in convolution accessor
- *
- * @return Number of groups in convolution
- */
- unsigned int num_groups() const;
-
- /** Convolution layer method accessor
- *
- * @note This is an indication on which convolution layer implementation to use,
- * if it fails to be created the library's heuristic approach will be used
- *
- * @return Convolution layer method to be used by the node
- */
- ConvolutionMethod convolution_method() const;
-
- /** Sets the fast math hint
- *
- * @param[in] hint Hint to use for convolution
- */
- void set_fast_math_hint(FastMathHint hint);
-
- /** Fast math hint accessor
- *
- * @return Fast math hint to be used by the node
- */
- FastMathHint fast_math_hint() const;
-
- /** Convolution metadata accessor
- *
- * @return Convolution information
- */
- PadStrideInfo convolution_info() const;
-
- // Inherited overridden methods:
- NodeType type() const override;
- bool forward_descriptors() override;
- TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
-
-public:
- static constexpr NodeType node_type = NodeType::FusedConvolutionBatchNormalizationLayerWithPostOpsLayer;
-
-private:
- float _epsilon;
-
- PadStrideInfo _info;
- unsigned int _num_groups;
- ConvolutionMethod _method;
- FastMathHint _fast_math_hint;
-};
-
-} // namespace graph
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_BATCH_NORMALIZATION_LAYER_NODE_H */
diff --git a/arm_compute/graph/nodes/FusedConvolutionWithPostOpNode.h b/arm_compute/graph/nodes/FusedConvolutionWithPostOpNode.h
deleted file mode 100644
index 6048994b02..0000000000
--- a/arm_compute/graph/nodes/FusedConvolutionWithPostOpNode.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_GRAPH_FUSED_CONVOLUTION_WITH_POST_OP_NODE_H
-#define ARM_COMPUTE_GRAPH_FUSED_CONVOLUTION_WITH_POST_OP_NODE_H
-
-#include "arm_compute/graph/INode.h"
-
-#include <list>
-
-namespace arm_compute
-{
-namespace graph
-{
-/** Convolution node */
-class FusedConvolutionWithPostOpNode final : public INode
-{
-public:
- /** Constructor
- *
- * @param[in] info Convolution layer attributes
- * @param[in] num_groups (Optional) Number of groups (Defaults to 1)
- * @param[in] method (Optional) Convolution method to use
- * @param[in] fast_math_hint (Optional) Fast math hint
- * @param[in] out_quant_info (Optional) Output quantization info
- */
- FusedConvolutionWithPostOpNode(PadStrideInfo info,
- unsigned int num_groups = 1,
- ConvolutionMethod method = ConvolutionMethod::Default,
- FastMathHint fast_math_hint = FastMathHint::Disabled,
- QuantizationInfo out_quant_info = QuantizationInfo());
- /** Sets the convolution layer method to use
- *
- * @param[in] method Method to use for convolution
- */
- void set_convolution_method(ConvolutionMethod method);
- /** Convolution layer method accessor
- *
- * @note This is an indication on which convolution layer implementation to use,
- * if it fails to be created the library's heuristic approach will be used
- *
- * @return Convolution layer method to be used by the node
- */
- ConvolutionMethod convolution_method() const;
- /** Sets the fast math fast hint
- *
- * @param[in] hint Hint to use for convolution
- */
- void set_fast_math_hint(FastMathHint hint);
- /** Fast math hint accessor
- *
- * @return Fast math hint to be used by the node
- */
- FastMathHint fast_math_hint() const;
- /** Convolution metadata accessor
- *
- * @return Convolution information
- */
- PadStrideInfo convolution_info() const;
- /** Number of groups in convolution accessor
- *
- * @return Number of groups in convolution
- */
- unsigned int num_groups() const;
- /** Returns fused activation
- *
- * @return Fused activation
- */
- ActivationLayerInfo fused_activation() const;
- /** Sets fused activation
- *
- * @param[in] fused_activation Fused activation to set
- */
- void set_fused_activation(ActivationLayerInfo fused_activation);
- /** Sets convolution info
- *
- * @param[in] info Convolution info to set
- */
- void set_convolution_info(PadStrideInfo info);
- /** Computes convolution output descriptor
- *
- * @param[in] input_descriptor Input descriptor
- * @param[in] weights_descriptor Weights descriptor
- * @param[in] info Convolution operation attributes
- *
- * @return Output descriptor
- */
- static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
- const TensorDescriptor &weights_descriptor,
- const PadStrideInfo &info);
-
- // Inherited overridden methods:
- NodeType type() const override;
- bool forward_descriptors() override;
- TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
-
-public:
- static constexpr NodeType node_type = NodeType::FusedConvolutionWithPostOp;
-
-private:
- PadStrideInfo _info;
- unsigned int _num_groups;
- ConvolutionMethod _method;
- FastMathHint _fast_math_hint;
- QuantizationInfo _out_quant_info;
- ActivationLayerInfo _fused_activation;
-};
-} // namespace graph
-} // namespace arm_compute
-
-#endif /* ARM_COMPUTE_GRAPH_FUSED_CONVOLUTION_WITH_POST_OP_NODE_H */
diff --git a/arm_compute/graph/nodes/Nodes.h b/arm_compute/graph/nodes/Nodes.h
index 3887eaeac6..ae9f177ec4 100644
--- a/arm_compute/graph/nodes/Nodes.h
+++ b/arm_compute/graph/nodes/Nodes.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_NODES_H
-#define ARM_COMPUTE_GRAPH_NODES_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_NODES_NODES_H
+#define ACL_ARM_COMPUTE_GRAPH_NODES_NODES_H
#include "arm_compute/graph/nodes/ActivationLayerNode.h"
#include "arm_compute/graph/nodes/ArgMinMaxLayerNode.h"
@@ -43,8 +43,6 @@
#include "arm_compute/graph/nodes/FlattenLayerNode.h"
#include "arm_compute/graph/nodes/FullyConnectedLayerNode.h"
#include "arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h"
-#include "arm_compute/graph/nodes/FusedConvolutionBatchNormalizationWithPostOpsNode.h"
-#include "arm_compute/graph/nodes/FusedConvolutionWithPostOpNode.h"
#include "arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h"
#include "arm_compute/graph/nodes/GenerateProposalsLayerNode.h"
#include "arm_compute/graph/nodes/InputNode.h"
@@ -70,4 +68,4 @@
#include "arm_compute/graph/nodes/StackLayerNode.h"
#include "arm_compute/graph/nodes/StridedSliceLayerNode.h"
-#endif /* ARM_COMPUTE_GRAPH_NODES_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_NODES_NODES_H
diff --git a/arm_compute/graph/nodes/NodesFwd.h b/arm_compute/graph/nodes/NodesFwd.h
index f1576d6336..580f339468 100644
--- a/arm_compute/graph/nodes/NodesFwd.h
+++ b/arm_compute/graph/nodes/NodesFwd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_NODES_FWD_H
-#define ARM_COMPUTE_GRAPH_NODES_FWD_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_NODES_NODESFWD_H
+#define ACL_ARM_COMPUTE_GRAPH_NODES_NODESFWD_H
namespace arm_compute
{
@@ -49,9 +49,7 @@ class EltwiseLayerNode;
class FlattenLayerNode;
class FullyConnectedLayerNode;
class FusedConvolutionBatchNormalizationNode;
-class FusedConvolutionWithPostOpNode;
class FusedDepthwiseConvolutionBatchNormalizationNode;
-class FusedConvolutionBatchNormalizationWithPostOpsNode;
class GenerateProposalsLayerNode;
class InputNode;
class L2NormalizeLayerNode;
@@ -77,4 +75,4 @@ class StackLayerNode;
class StridedSliceLayerNode;
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_NODES_FWD_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_NODES_NODESFWD_H
diff --git a/arm_compute/graph/printers/DotGraphPrinter.h b/arm_compute/graph/printers/DotGraphPrinter.h
index 63b89272f4..564aecfb1e 100644
--- a/arm_compute/graph/printers/DotGraphPrinter.h
+++ b/arm_compute/graph/printers/DotGraphPrinter.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019,2021 Arm Limited.
+ * Copyright (c) 2018-2019,2021,2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_GRAPH_DOTGRAPHPRINTER_H
-#define ARM_COMPUTE_GRAPH_DOTGRAPHPRINTER_H
+#ifndef ACL_ARM_COMPUTE_GRAPH_PRINTERS_DOTGRAPHPRINTER_H
+#define ACL_ARM_COMPUTE_GRAPH_PRINTERS_DOTGRAPHPRINTER_H
#include "arm_compute/graph/IGraphPrinter.h"
@@ -57,8 +57,6 @@ public:
void visit(DepthwiseConvolutionLayerNode &n) override;
void visit(EltwiseLayerNode &n) override;
void visit(FusedConvolutionBatchNormalizationNode &n) override;
- void visit(FusedConvolutionBatchNormalizationWithPostOpsNode &n) override;
- void visit(FusedConvolutionWithPostOpNode &n) override;
void visit(FusedDepthwiseConvolutionBatchNormalizationNode &n) override;
void visit(NormalizationLayerNode &n) override;
void visit(PoolingLayerNode &n) override;
@@ -106,4 +104,4 @@ private:
};
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_DOTGRAPHPRINTER_H */
+#endif // ACL_ARM_COMPUTE_GRAPH_PRINTERS_DOTGRAPHPRINTER_H