aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/graph
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-07-20 13:23:44 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commite2220551b7a64b929650ba9a60529c31e70c13c5 (patch)
tree5d609887f15b4392cdade7bb388710ceafc62260 /arm_compute/graph
parenteff8d95991205e874091576e2d225f63246dd0bb (diff)
downloadComputeLibrary-e2220551b7a64b929650ba9a60529c31e70c13c5.tar.gz
COMPMID-1367: Enable NHWC in graph examples
Change-Id: Iabc54a3a1bdcd46a9a921cda39c7c85fef672b72 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/141449 Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/graph')
-rw-r--r--arm_compute/graph/GraphBuilder.h9
-rw-r--r--arm_compute/graph/INodeVisitor.h16
-rw-r--r--arm_compute/graph/TypePrinter.h44
-rw-r--r--arm_compute/graph/Types.h24
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h116
-rw-r--r--arm_compute/graph/backends/ValidateHelpers.h16
-rw-r--r--arm_compute/graph/frontend/Layers.h6
-rw-r--r--arm_compute/graph/frontend/Types.h6
-rw-r--r--arm_compute/graph/nodes/ConcatenateLayerNode.h (renamed from arm_compute/graph/nodes/DepthConcatenateLayerNode.h)37
-rw-r--r--arm_compute/graph/nodes/ConvolutionLayerNode.h6
-rw-r--r--arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/Nodes.h2
-rw-r--r--arm_compute/graph/nodes/NodesFwd.h2
-rw-r--r--arm_compute/graph/printers/DotGraphPrinter.h2
14 files changed, 151 insertions, 137 deletions
diff --git a/arm_compute/graph/GraphBuilder.h b/arm_compute/graph/GraphBuilder.h
index 6359e05a63..191848c15f 100644
--- a/arm_compute/graph/GraphBuilder.h
+++ b/arm_compute/graph/GraphBuilder.h
@@ -128,8 +128,8 @@ public:
* @return Node ID of the created node, EmptyNodeID in case of error
*/
static NodeID add_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
- Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info,
- unsigned int num_groups = 1, ConvolutionMethod method = ConvolutionMethod::DEFAULT, FastMathHint fast_math_hint = FastMathHint::DISABLED,
+ Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info, unsigned int num_groups = 1,
+ ConvolutionMethod method = ConvolutionMethod::Default, FastMathHint fast_math_hint = FastMathHint::Disabled,
ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr,
const QuantizationInfo weights_quant_info = QuantizationInfo(),
const QuantizationInfo out_quant_info = QuantizationInfo());
@@ -155,10 +155,11 @@ public:
* @param[in] g Graph to add the node to
* @param[in] params Common node parameters
* @param[in] inputs Inputs to the depth concatenate layer node as a NodeID-Index pair
+ * @param[in] axis Concatenation axis
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_depth_concatenate_node(Graph &g, NodeParams params, std::vector<NodeIdxPair> inputs);
+ static NodeID add_concatenate_node(Graph &g, NodeParams params, std::vector<NodeIdxPair> inputs, DataLayoutDimension axis);
/** Adds a depth-wise convolution layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -175,7 +176,7 @@ public:
*/
static NodeID add_depthwise_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
Size2D kernel_spatial_extend, PadStrideInfo conv_info,
- DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::DEFAULT,
+ DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::Default,
ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr, const QuantizationInfo quant_info = QuantizationInfo());
/** Adds an element-wise layer node to the graph
*
diff --git a/arm_compute/graph/INodeVisitor.h b/arm_compute/graph/INodeVisitor.h
index b5446c4a55..ad390ad760 100644
--- a/arm_compute/graph/INodeVisitor.h
+++ b/arm_compute/graph/INodeVisitor.h
@@ -51,6 +51,11 @@ public:
* @param[in] n Node to visit.
*/
virtual void visit(BatchNormalizationLayerNode &n) = 0;
+ /** Visit ConcatenateLayerNode.
+ *
+ * @param[in] n Node to visit.
+ */
+ virtual void visit(ConcatenateLayerNode &n) = 0;
/** Visit ConstNode.
*
* @param[in] n Node to visit.
@@ -61,11 +66,6 @@ public:
* @param[in] n Node to visit.
*/
virtual void visit(ConvolutionLayerNode &n) = 0;
- /** Visit DepthConcatenateLayerNode.
- *
- * @param[in] n Node to visit.
- */
- virtual void visit(DepthConcatenateLayerNode &n) = 0;
/** Visit DepthwiseConvolutionLayerNode.
*
* @param[in] n Node to visit.
@@ -148,15 +148,15 @@ public:
{
default_visit();
}
- virtual void visit(ConstNode &n) override
+ virtual void visit(ConcatenateLayerNode &n) override
{
default_visit();
}
- virtual void visit(ConvolutionLayerNode &n) override
+ virtual void visit(ConstNode &n) override
{
default_visit();
}
- virtual void visit(DepthConcatenateLayerNode &n) override
+ virtual void visit(ConvolutionLayerNode &n) override
{
default_visit();
}
diff --git a/arm_compute/graph/TypePrinter.h b/arm_compute/graph/TypePrinter.h
index c3601f2373..7c0bd8cfdd 100644
--- a/arm_compute/graph/TypePrinter.h
+++ b/arm_compute/graph/TypePrinter.h
@@ -71,15 +71,15 @@ inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
case NodeType::ChannelShuffleLayer:
os << "ChannelShuffleLayer";
break;
+ case NodeType::ConcatenateLayer:
+ os << "ConcatenateLayer";
+ break;
case NodeType::ConvolutionLayer:
os << "ConvolutionLayer";
break;
case NodeType::DeconvolutionLayer:
os << "DeconvolutionLayer";
break;
- case NodeType::DepthConcatenateLayer:
- os << "DepthConcatenateLayer";
- break;
case NodeType::DepthwiseConvolutionLayer:
os << "DepthwiseConvolutionLayer";
break;
@@ -134,14 +134,14 @@ inline ::std::ostream &operator<<(::std::ostream &os, const EltwiseOperation &el
{
switch(eltwise_op)
{
- case EltwiseOperation::ADD:
- os << "ADD";
+ case EltwiseOperation::Add:
+ os << "Add";
break;
- case EltwiseOperation::MUL:
- os << "MUL";
+ case EltwiseOperation::Mul:
+ os << "Mul";
break;
- case EltwiseOperation::SUB:
- os << "SUB";
+ case EltwiseOperation::Sub:
+ os << "Sub";
break;
default:
ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
@@ -155,17 +155,17 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionMethod &m
{
switch(method)
{
- case ConvolutionMethod::DEFAULT:
- os << "DEFAULT";
+ case ConvolutionMethod::Default:
+ os << "Default";
break;
- case ConvolutionMethod::DIRECT:
- os << "DIRECT";
+ case ConvolutionMethod::Direct:
+ os << "Direct";
break;
case ConvolutionMethod::GEMM:
os << "GEMM";
break;
- case ConvolutionMethod::WINOGRAD:
- os << "WINOGRAD";
+ case ConvolutionMethod::Winograd:
+ os << "Winograd";
break;
default:
ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
@@ -179,11 +179,11 @@ inline ::std::ostream &operator<<(::std::ostream &os, const FastMathHint &hint)
{
switch(hint)
{
- case FastMathHint::ENABLED:
- os << "ENABLED";
+ case FastMathHint::Enabled:
+ os << "Enabled";
break;
- case FastMathHint::DISABLED:
- os << "DISABLED";
+ case FastMathHint::Disabled:
+ os << "Disabled";
break;
default:
ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
@@ -197,14 +197,14 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DepthwiseConvolution
{
switch(method)
{
- case DepthwiseConvolutionMethod::DEFAULT:
+ case DepthwiseConvolutionMethod::Default:
os << "DEFAULT";
break;
case DepthwiseConvolutionMethod::GEMV:
os << "GEMV";
break;
- case DepthwiseConvolutionMethod::OPTIMIZED_3x3:
- os << "OPTIMIZED_3x3";
+ case DepthwiseConvolutionMethod::Optimized3x3:
+ os << "Optimized3x3";
break;
default:
ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index c5b7fb1c51..f22f50ac82 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -96,33 +96,33 @@ enum class Target
/** Supported Element-wise operations */
enum class EltwiseOperation
{
- ADD, /**< Arithmetic addition */
- SUB, /**< Arithmetic subtraction */
- MUL /**< Arithmetic multiplication */
+ Add, /**< Arithmetic addition */
+ Sub, /**< Arithmetic subtraction */
+ Mul /**< Arithmetic multiplication */
};
/** Supported Convolution layer methods */
enum class ConvolutionMethod
{
- DEFAULT, /**< Default approach using internal heuristics */
+ Default, /**< Default approach using internal heuristics */
GEMM, /**< GEMM based convolution */
- DIRECT, /**< Deep direct convolution */
- WINOGRAD /**< Winograd based convolution */
+ Direct, /**< Deep direct convolution */
+ Winograd /**< Winograd based convolution */
};
/** Supported Depthwise Convolution layer methods */
enum class DepthwiseConvolutionMethod
{
- DEFAULT, /**< Default approach using internal heuristics */
- GEMV, /**< Generic GEMV based depthwise convolution */
- OPTIMIZED_3x3, /**< Optimized 3x3 direct depthwise convolution */
+ Default, /**< Default approach using internal heuristics */
+ GEMV, /**< Generic GEMV based depthwise convolution */
+ Optimized3x3, /**< Optimized 3x3 direct depthwise convolution */
};
/** Enable or disable fast math for Convolution layer */
enum class FastMathHint
{
- ENABLED, /**< Fast math enabled for Convolution layer */
- DISABLED, /**< Fast math disabled for Convolution layer */
+ Enabled, /**< Fast math enabled for Convolution layer */
+ Disabled, /**< Fast math disabled for Convolution layer */
};
/** Supported nodes */
@@ -131,9 +131,9 @@ enum class NodeType
ActivationLayer,
BatchNormalizationLayer,
ChannelShuffleLayer,
+ ConcatenateLayer,
ConvolutionLayer,
DeconvolutionLayer,
- DepthConcatenateLayer,
DepthwiseConvolutionLayer,
EltwiseLayer,
FlattenLayer,
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index 978d3bc1a8..172f00277e 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -192,6 +192,52 @@ std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode
return std::move(func);
}
+/** Create a backend layer concatenate function
+ *
+ * @tparam ConcatenateLayerFunction Backend concatenate function
+ * @tparam TargetInfo Target-specific information
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend concatenate layer function
+ */
+template <typename ConcatenateLayerFunction, typename TargetInfo>
+std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+ // Return nullptr if depth concatenate is switched off
+ if(!node.is_enabled())
+ {
+ return nullptr;
+ }
+
+ // Extract IO and info
+ std::vector<typename TargetInfo::TensorType *> inputs;
+ for(unsigned int i = 0; i < node.num_inputs(); ++i)
+ {
+ inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
+ }
+ typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
+ const DataLayoutDimension concat_axis = node.concatenation_axis();
+
+ // Create and configure function
+ auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
+ func->configure(inputs, output, concat_axis);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
+ << " Target " << TargetInfo::TargetType
+ << " Data Type: " << output->info()->data_type()
+ << " Shape: " << output->info()->tensor_shape()
+ << " Num Inputs: " << inputs.size()
+ << " Axis: " << concat_axis
+ << std::endl);
+
+ return std::move(func);
+}
+
/** Create a backend convolution layer function
*
* @tparam ConvolutionLayerFunctions Backend convolution functions
@@ -220,20 +266,20 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
const PadStrideInfo conv_info = node.convolution_info();
const ConvolutionMethod conv_algorithm = node.convolution_method();
- const bool fast_math = node.fast_math_hint() == FastMathHint::ENABLED;
+ const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
// Create and configure function (we assume that functions have been validated before creation)
std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
std::unique_ptr<IFunction> func;
std::string func_name;
- if(conv_algorithm == ConvolutionMethod::WINOGRAD)
+ if(conv_algorithm == ConvolutionMethod::Winograd)
{
std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
std::string("WinogradConvolutionLayer"), mm,
input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
}
- else if(conv_algorithm == ConvolutionMethod::DIRECT)
+ else if(conv_algorithm == ConvolutionMethod::Direct)
{
std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
std::string("DirectConvolutionLayer"),
@@ -308,50 +354,6 @@ std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &no
return func;
}
-/** Create a backend layer depth concatenate function
- *
- * @tparam DepthConcatenateLayerFunction Backend depth concatenate function
- * @tparam TargetInfo Target-specific information
- *
- * @param[in] node Node to create the backend function for
- *
- * @return Backend depth concatenate layer function
- */
-template <typename DepthConcatenateLayerFunction, typename TargetInfo>
-std::unique_ptr<arm_compute::IFunction> create_depth_concatenate_layer(DepthConcatenateLayerNode &node)
-{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating DepthConcatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
- ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
-
- // Return nullptr if depth concatenate is switched off
- if(!node.is_enabled())
- {
- return nullptr;
- }
-
- // Extract IO and info
- std::vector<typename TargetInfo::TensorType *> inputs;
- for(unsigned int i = 0; i < node.num_inputs(); ++i)
- {
- inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
- }
- typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
-
- // Create and configure function
- auto func = support::cpp14::make_unique<DepthConcatenateLayerFunction>();
- func->configure(inputs, output);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
- << " Data Type: " << output->info()->data_type()
- << " Shape: " << output->info()->tensor_shape()
- << " Num Inputs: " << inputs.size()
- << std::endl);
-
- return std::move(func);
-}
-
/** Create a backend layer depth-wise convolution function
*
* @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
@@ -383,7 +385,7 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
// Create and configure function (we assume that functions have been validated before creation)
std::unique_ptr<IFunction> func;
std::string func_name;
- if(dwc_algorithm == DepthwiseConvolutionMethod::OPTIMIZED_3x3)
+ if(dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3)
{
std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::DepthwiseConvolutionLayer3x3>(
std::string("DepthwiseConvolutionLayer3x3"),
@@ -435,19 +437,19 @@ std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
std::unique_ptr<IFunction> func = nullptr;
std::string func_name;
- if(eltwise_op == EltwiseOperation::ADD)
+ if(eltwise_op == EltwiseOperation::Add)
{
std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
std::string("ArithmeticAddition"),
input1, input2, output, convert_policy);
}
- else if(eltwise_op == EltwiseOperation::SUB)
+ else if(eltwise_op == EltwiseOperation::Sub)
{
std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
std::string("ArithmeticSubtraction"),
input1, input2, output, convert_policy);
}
- else if(eltwise_op == EltwiseOperation::MUL)
+ else if(eltwise_op == EltwiseOperation::Mul)
{
std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
std::string("PixelWiseMultiplication"),
@@ -487,11 +489,12 @@ std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
+ ARM_COMPUTE_ERROR_ON(input == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr);
+
// Create and configure function
auto func = support::cpp14::make_unique<FlattenLayerFunction>();
func->configure(input, output);
- ARM_COMPUTE_ERROR_ON(input == nullptr);
- ARM_COMPUTE_ERROR_ON(output == nullptr);
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
@@ -526,13 +529,14 @@ std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode
typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
const FullyConnectedLayerInfo fc_info = node.info();
- // Create and configure function
- auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
- func->configure(input, weights, biases, output, fc_info);
ARM_COMPUTE_ERROR_ON(input == nullptr);
ARM_COMPUTE_ERROR_ON(weights == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr);
+ // Create and configure function
+ auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
+ func->configure(input, weights, biases, output, fc_info);
+
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
<< " Target " << TargetInfo::TargetType
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index 189fbdc9c7..ae52593b03 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -106,22 +106,22 @@ Status validate_convolution_layer(ConvolutionLayerNode &node)
const PadStrideInfo conv_info = node.convolution_info();
const ConvolutionMethod conv_algorithm = node.convolution_method();
- //const bool fast_math = node.fast_math_hint() == FastMathHint::ENABLED; // FIXME (COMPMID-1138): uncomment once NEON and GLES support fast_math
+ const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
// Validate function
Status status{};
switch(conv_algorithm)
{
- case ConvolutionMethod::DIRECT:
+ case ConvolutionMethod::Direct:
status = DirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
break;
case ConvolutionMethod::GEMM:
status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info);
break;
- case ConvolutionMethod::WINOGRAD:
- status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info /*, fast_math*/);
+ case ConvolutionMethod::Winograd:
+ status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
break;
- case ConvolutionMethod::DEFAULT:
+ case ConvolutionMethod::Default:
status = ConvolutionLayer::validate(input, weights, biases, output, conv_info);
break;
default:
@@ -136,7 +136,7 @@ Status validate_convolution_layer(ConvolutionLayerNode &node)
{
ARM_COMPUTE_LOG_GRAPH_INFO("Switched ConvolutionLayer method of node with ID : "
<< node.id() << " and Name: " << node.name() << std::endl);
- node.set_convolution_method(ConvolutionMethod::DEFAULT);
+ node.set_convolution_method(ConvolutionMethod::Default);
}
}
@@ -166,11 +166,11 @@ Status validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
// TODO (geopin01) : Switch when validation is implemented
// Validate function
- if((dwc_algorithm == DepthwiseConvolutionMethod::OPTIMIZED_3x3) && (weights->tensor_shape()[get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH)] != 3))
+ if((dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3) && (weights->tensor_shape()[get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH)] != 3))
{
ARM_COMPUTE_LOG_GRAPH_INFO("Switched DepthwiseConvolutionLayer method of node with ID : "
<< node.id() << " and Name: " << node.name() << std::endl);
- node.set_depthwise_convolution_method(DepthwiseConvolutionMethod::DEFAULT);
+ node.set_depthwise_convolution_method(DepthwiseConvolutionMethod::Default);
}
return Status{};
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index 197d2ea409..02ef56952d 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -595,7 +595,7 @@ public:
}
else if(_branch_merge_method == BranchMergeMethod::DEPTH_CONCATENATE)
{
- // Collect tail nodes and perform DepthConcatenate
+ // Collect tail nodes and concatenate
std::vector<NodeIdxPair> nodes;
for(auto &ss : _sub_streams)
{
@@ -608,14 +608,14 @@ public:
}
}
}
- nid = GraphBuilder::add_depth_concatenate_node(s.graph(), common_params, nodes);
+ nid = GraphBuilder::add_concatenate_node(s.graph(), common_params, nodes, DataLayoutDimension::CHANNEL);
}
else
{
ARM_COMPUTE_ERROR_ON(_sub_streams.size() != 2);
NodeIdxPair input0 = { _sub_streams[0]->tail_node(), 0 };
NodeIdxPair input1 = { _sub_streams[1]->tail_node(), 0 };
- nid = GraphBuilder::add_elementwise_node(s.graph(), common_params, input0, input1, EltwiseOperation::ADD);
+ nid = GraphBuilder::add_elementwise_node(s.graph(), common_params, input0, input1, EltwiseOperation::Add);
}
return nid;
}
diff --git a/arm_compute/graph/frontend/Types.h b/arm_compute/graph/frontend/Types.h
index cd579e2119..f9d4952765 100644
--- a/arm_compute/graph/frontend/Types.h
+++ b/arm_compute/graph/frontend/Types.h
@@ -64,9 +64,9 @@ enum class BranchMergeMethod
struct StreamHints
{
Target target_hint = { Target::UNSPECIFIED }; /**< Target execution hint */
- ConvolutionMethod convolution_method_hint = { ConvolutionMethod::DEFAULT }; /**< Convolution method hint */
- DepthwiseConvolutionMethod depthwise_convolution_method_hint = { DepthwiseConvolutionMethod::DEFAULT }; /**< Depthwise Convolution method hint */
- FastMathHint fast_math_hint = { FastMathHint::DISABLED }; /**< Fast math hint */
+ ConvolutionMethod convolution_method_hint = { ConvolutionMethod::Default }; /**< Convolution method hint */
+ DepthwiseConvolutionMethod depthwise_convolution_method_hint = { DepthwiseConvolutionMethod::Default }; /**< Depthwise Convolution method hint */
+ FastMathHint fast_math_hint = { FastMathHint::Disabled }; /**< Fast math hint */
};
} // namespace frontend
} // namespace graph
diff --git a/arm_compute/graph/nodes/DepthConcatenateLayerNode.h b/arm_compute/graph/nodes/ConcatenateLayerNode.h
index ffdec709ef..20c8523752 100644
--- a/arm_compute/graph/nodes/DepthConcatenateLayerNode.h
+++ b/arm_compute/graph/nodes/ConcatenateLayerNode.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_GRAPH_DEPTH_CONCATENATE_LAYER_NODE_H__
-#define __ARM_COMPUTE_GRAPH_DEPTH_CONCATENATE_LAYER_NODE_H__
+#ifndef __ARM_COMPUTE_GRAPH_CONCATENATE_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH_CONCATENATE_LAYER_NODE_H__
#include "arm_compute/graph/INode.h"
@@ -30,30 +30,31 @@ namespace arm_compute
{
namespace graph
{
-/** Depth Concatenation Layer node */
-class DepthConcatenateLayerNode final : public INode
+/** Concatenation Layer node */
+class ConcatenateLayerNode final : public INode
{
public:
/** Constructor
*
* @param[in] total_nodes Number of nodes that will get concatenated
+ * @param[in] axis Concatenation axis
*/
- DepthConcatenateLayerNode(unsigned int total_nodes);
- /** Computes depth concatenations output descriptor
+ ConcatenateLayerNode(unsigned int total_nodes, DataLayoutDimension axis);
+ /** Computes concatenations output descriptor
*
* @param[in] input_descriptors Input descriptors
+ * @param[in] axis Concatenation axis
*
* @return Expected output descriptor
*/
- static TensorDescriptor compute_output_descriptor(const std::vector<TensorDescriptor> &input_descriptors);
+ static TensorDescriptor compute_output_descriptor(const std::vector<TensorDescriptor> &input_descriptors, DataLayoutDimension axis);
/** Disables or not the depth concatenate node
*
- * @warning This is used when depth concatenate is performed with sub-tensors,
- * where this node is used as a placeholder.
+ * @warning This is used when concatenate is performed using sub-tensors, where this node is used as a placeholder.
*
- * @param[in] is_enabled If true a backend function is created to perform the depth concatenation (involves copying),
- * while if false, no function is created and we assume that subtensors are properly set to simulate
- * a no copy operation.
+ * @param[in] is_enabled If true a backend function is created to perform the concatenation (involves copying),
+ * while if false, no function is created and we assume that sub-tensors are properly set to simulate
+ * a zero copy operation.
*/
void set_enabled(bool is_enabled);
/** Enabled parameter accessor
@@ -61,6 +62,11 @@ public:
* @return True if a backend function is to be created else false
*/
bool is_enabled() const;
+ /** Concatenation axis parameter accessor
+ *
+ * @return Concatenation axis
+ */
+ DataLayoutDimension concatenation_axis() const;
// Inherited overridden methods:
NodeType type() const override;
@@ -69,9 +75,10 @@ public:
void accept(INodeVisitor &v) override;
private:
- unsigned int _total_nodes;
- bool _is_enabled;
+ unsigned int _total_nodes;
+ DataLayoutDimension _axis;
+ bool _is_enabled;
};
} // namespace graph
} // namespace arm_compute
-#endif /* __ARM_COMPUTE_GRAPH_DEPTH_CONCATENATE_LAYER_NODE_H__ */
+#endif /* __ARM_COMPUTE_GRAPH_CONCATENATE_LAYER_NODE_H__ */
diff --git a/arm_compute/graph/nodes/ConvolutionLayerNode.h b/arm_compute/graph/nodes/ConvolutionLayerNode.h
index aca60283d7..4299be6bb5 100644
--- a/arm_compute/graph/nodes/ConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/ConvolutionLayerNode.h
@@ -41,8 +41,10 @@ public:
* @param[in] fast_math_hint (Optional) Fast math hint
* @param[in] out_quant_info (Optional) Output quantization info
*/
- ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method = ConvolutionMethod::DEFAULT, FastMathHint fast_math_hint = FastMathHint::DISABLED,
- QuantizationInfo out_quant_info = QuantizationInfo());
+ ConvolutionLayerNode(PadStrideInfo info,
+ ConvolutionMethod method = ConvolutionMethod::Default,
+ FastMathHint fast_math_hint = FastMathHint::Disabled,
+ QuantizationInfo out_quant_info = QuantizationInfo());
/** Sets the convolution layer method to use
*
* @param[in] method Method to use for convolution
diff --git a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
index df6f456ac9..1a173c5421 100644
--- a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
@@ -39,7 +39,7 @@ public:
* @param[in] info Convolution layer attributes
* @param[in] method Depthwise convolution method to use
*/
- DepthwiseConvolutionLayerNode(PadStrideInfo info, DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::DEFAULT);
+ DepthwiseConvolutionLayerNode(PadStrideInfo info, DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::Default);
/** Sets the depthwise convolution method to use
*
* @param[in] method Depthwise convolution method to use
diff --git a/arm_compute/graph/nodes/Nodes.h b/arm_compute/graph/nodes/Nodes.h
index 97aa191916..f2e751e15f 100644
--- a/arm_compute/graph/nodes/Nodes.h
+++ b/arm_compute/graph/nodes/Nodes.h
@@ -27,10 +27,10 @@
#include "arm_compute/graph/nodes/ActivationLayerNode.h"
#include "arm_compute/graph/nodes/BatchNormalizationLayerNode.h"
#include "arm_compute/graph/nodes/ChannelShuffleLayerNode.h"
+#include "arm_compute/graph/nodes/ConcatenateLayerNode.h"
#include "arm_compute/graph/nodes/ConstNode.h"
#include "arm_compute/graph/nodes/ConvolutionLayerNode.h"
#include "arm_compute/graph/nodes/DeconvolutionLayerNode.h"
-#include "arm_compute/graph/nodes/DepthConcatenateLayerNode.h"
#include "arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h"
#include "arm_compute/graph/nodes/DummyNode.h"
#include "arm_compute/graph/nodes/EltwiseLayerNode.h"
diff --git a/arm_compute/graph/nodes/NodesFwd.h b/arm_compute/graph/nodes/NodesFwd.h
index 05979d796c..a0a9146dc4 100644
--- a/arm_compute/graph/nodes/NodesFwd.h
+++ b/arm_compute/graph/nodes/NodesFwd.h
@@ -33,10 +33,10 @@ class INode;
class ActivationLayerNode;
class BatchNormalizationLayerNode;
class ChannelShuffleLayerNode;
+class ConcatenateLayerNode;
class ConstNode;
class ConvolutionLayerNode;
class DeconvolutionLayerNode;
-class DepthConcatenateLayerNode;
class DepthwiseConvolutionLayerNode;
class DummyNode;
class EltwiseLayerNode;
diff --git a/arm_compute/graph/printers/DotGraphPrinter.h b/arm_compute/graph/printers/DotGraphPrinter.h
index 1d355a52ee..d4cf6928e5 100644
--- a/arm_compute/graph/printers/DotGraphPrinter.h
+++ b/arm_compute/graph/printers/DotGraphPrinter.h
@@ -52,8 +52,8 @@ public:
// Inherited methods overridden
void visit(ActivationLayerNode &n) override;
void visit(BatchNormalizationLayerNode &n) override;
+ void visit(ConcatenateLayerNode &n) override;
void visit(ConvolutionLayerNode &n) override;
- void visit(DepthConcatenateLayerNode &n) override;
void visit(DepthwiseConvolutionLayerNode &n) override;
void visit(EltwiseLayerNode &n) override;
void visit(NormalizationLayerNode &n) override;