aboutsummaryrefslogtreecommitdiff
path: root/src/graph
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-04-27 19:07:19 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:51:17 +0000
commitcac13b1cfd593889271f8e2191be2039b8d88f36 (patch)
treed1c5196877d7fbd5dcfbb9f9003faf6035f82a33 /src/graph
parentad0c7388f6261989a268ffb2d042f2bd80736e3f (diff)
downloadComputeLibrary-cac13b1cfd593889271f8e2191be2039b8d88f36.tar.gz
COMPMID-1097: Port mobilenet to NHWC
Change-Id: I789065bfa0d4ef133388e1904c5caf31e450f80f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/129495 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/graph')
-rw-r--r--src/graph/GraphBuilder.cpp22
-rw-r--r--src/graph/GraphManager.cpp2
-rw-r--r--src/graph/INode.cpp5
-rw-r--r--src/graph/Utils.cpp39
-rw-r--r--src/graph/backends/CL/CLDeviceBackend.cpp3
-rw-r--r--src/graph/backends/GLES/GCDeviceBackend.cpp3
-rw-r--r--src/graph/backends/NEON/NEDeviceBackend.cpp3
-rw-r--r--src/graph/detail/ExecutionHelpers.cpp36
-rw-r--r--src/graph/mutators/SplitLayerSubTensorMutator.cpp2
-rw-r--r--src/graph/nodes/ActivationLayerNode.cpp5
-rw-r--r--src/graph/nodes/BatchNormalizationLayerNode.cpp5
-rw-r--r--src/graph/nodes/ConstNode.cpp7
-rw-r--r--src/graph/nodes/ConvolutionLayerNode.cpp33
-rw-r--r--src/graph/nodes/DepthConcatenateLayerNode.cpp38
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayerNode.cpp31
-rw-r--r--src/graph/nodes/EltwiseLayerNode.cpp5
-rw-r--r--src/graph/nodes/FlattenLayerNode.cpp5
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp38
-rw-r--r--src/graph/nodes/InputNode.cpp7
-rw-r--r--src/graph/nodes/NormalizationLayerNode.cpp5
-rw-r--r--src/graph/nodes/OutputNode.cpp5
-rw-r--r--src/graph/nodes/PoolingLayerNode.cpp33
-rw-r--r--src/graph/nodes/ReshapeLayer.cpp5
-rw-r--r--src/graph/nodes/SoftmaxLayerNode.cpp5
-rw-r--r--src/graph/nodes/SplitLayerNode.cpp29
-rw-r--r--src/graph/printers/DotGraphPrinter.cpp2
26 files changed, 179 insertions, 194 deletions
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index 4ad34e789c..56b31c7844 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -63,7 +63,7 @@ Status set_accessor_on_node(Graph &g, NodeID nid, bool is_output, size_t idx, IT
NodeID add_const_node_with_name(Graph &g, NodeParams params, const std::string &name, TensorDescriptor desc, ITensorAccessorUPtr accessor)
{
params.name = params.name.empty() ? "" : params.name + name;
- auto nid = GraphBuilder::add_const_node(g, params, desc, std::move(accessor));
+ auto nid = GraphBuilder::add_const_node(g, params, std::move(desc), std::move(accessor));
set_node_params(g, nid, params);
return nid;
}
@@ -165,7 +165,7 @@ NodeID GraphBuilder::add_batch_normalization_node(Graph &g, NodeParams params, N
// Calculate Common Descriptor
TensorDescriptor common_desc = input_tensor_desc;
- common_desc.shape = TensorShape(common_desc.shape.z());
+ common_desc.shape = TensorShape(get_dimension_size(input_tensor_desc, DataLayoutDimension::CHANNEL));
// Create mean and nodes
auto mean_nid = add_const_node_with_name(g, params, "Mean", common_desc, std::move(mean_accessor));
@@ -221,8 +221,11 @@ NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPa
// Create weights node
TensorDescriptor w_desc = input_tensor_desc;
- w_desc.shape = TensorShape(kernel_spatial_extend.width, kernel_spatial_extend.height, w_desc.shape.z() / num_groups, depth);
-
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::WIDTH), kernel_spatial_extend.width);
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height);
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL),
+ get_dimension_size(input_tensor_desc, DataLayoutDimension::CHANNEL) / num_groups);
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::BATCHES), depth);
if(!weights_quant_info.empty())
{
w_desc.quant_info = weights_quant_info;
@@ -290,8 +293,10 @@ NodeID GraphBuilder::add_depthwise_convolution_node(Graph &g, NodeParams params,
// Create weights node
TensorDescriptor w_desc = input_tensor_desc;
- w_desc.shape = TensorShape(kernel_spatial_extend.width, kernel_spatial_extend.height, w_desc.shape.z());
-
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::WIDTH), kernel_spatial_extend.width);
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height);
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL),
+ get_dimension_size(input_tensor_desc, DataLayoutDimension::CHANNEL));
if(!quant_info.empty())
{
w_desc.quant_info = quant_info;
@@ -353,9 +358,8 @@ NodeID GraphBuilder::add_fully_connected_layer(Graph &g, NodeParams params, Node
const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
// Create weights node
- TensorDescriptor w_desc = input_tensor_desc;
- w_desc.shape = FullyConnectedLayerNode::compute_weights_shape(input_tensor_desc.shape, num_outputs);
- NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
+ TensorDescriptor w_desc = FullyConnectedLayerNode::compute_weights_descriptor(input_tensor_desc, num_outputs);
+ NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
// Create bias nodes
NodeID b_nid = EmptyNodeID;
diff --git a/src/graph/GraphManager.cpp b/src/graph/GraphManager.cpp
index fa7dfdf8f8..aac6488311 100644
--- a/src/graph/GraphManager.cpp
+++ b/src/graph/GraphManager.cpp
@@ -62,8 +62,6 @@ void GraphManager::finalize_graph(Graph &graph, GraphContext &ctx, PassManager &
// Apply all mutating passes
pm.run_all(graph);
- // TODO (geopin01): Perform a graph validation
-
// Perform topological sort
// FIXME : Sort nodes and pass sorted indices in configure all nodes
diff --git a/src/graph/INode.cpp b/src/graph/INode.cpp
index c1c18e5853..cd9a46ac40 100644
--- a/src/graph/INode.cpp
+++ b/src/graph/INode.cpp
@@ -42,6 +42,11 @@ INode::INode()
// clang-format on
// *INDENT-ON*
+Status INode::validate() const
+{
+ return Status{};
+}
+
void INode::set_graph(Graph *g)
{
ARM_COMPUTE_ERROR_ON(g == nullptr);
diff --git a/src/graph/Utils.cpp b/src/graph/Utils.cpp
index 8537bbfb2a..030fa2df59 100644
--- a/src/graph/Utils.cpp
+++ b/src/graph/Utils.cpp
@@ -89,10 +89,6 @@ PassManager create_default_pass_manager(Target target)
return pm;
}
-/** Default setups a graph Context
- *
- * @param[in] ctx Context to default initialize
- */
void setup_default_graph_context(GraphContext &ctx)
{
for(const auto &backend : backends::BackendRegistry::get().backends())
@@ -100,5 +96,40 @@ void setup_default_graph_context(GraphContext &ctx)
backend.second->setup_backend_context(ctx);
}
}
+
+size_t get_dimension_size(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(descriptor.layout == DataLayout::UNKNOWN, "Cannot retrieve the dimension index for an unknown layout!");
+ return descriptor.shape[get_dimension_idx(descriptor, data_layout_dimension)];
+}
+
+size_t get_dimension_idx(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(descriptor.layout == DataLayout::UNKNOWN, "Cannot retrieve the dimension index for an unknown layout!");
+
+ /* Return the index based on the data layout
+ * [N C H W]
+ * [3 2 1 0]
+ * [N H W C]
+ */
+ switch(data_layout_dimension)
+ {
+ case DataLayoutDimension::CHANNEL:
+ return (descriptor.layout == DataLayout::NCHW) ? 2 : 0;
+ break;
+ case DataLayoutDimension::HEIGHT:
+ return (descriptor.layout == DataLayout::NCHW) ? 1 : 2;
+ break;
+ case DataLayoutDimension::WIDTH:
+ return (descriptor.layout == DataLayout::NCHW) ? 0 : 1;
+ break;
+ case DataLayoutDimension::BATCHES:
+ return 3;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data layout index not supported!");
+ break;
+ }
+}
} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/backends/CL/CLDeviceBackend.cpp b/src/graph/backends/CL/CLDeviceBackend.cpp
index 92cb6936c3..37cbcd72d7 100644
--- a/src/graph/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph/backends/CL/CLDeviceBackend.cpp
@@ -127,7 +127,8 @@ std::unique_ptr<ITensorHandle> CLDeviceBackend::create_tensor(const Tensor &tens
// Create backend tensor handle
TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
- auto backend_tensor_handle = support::cpp14::make_unique<CLTensorHandle>(info);
+ info.set_data_layout(tensor_desc.layout);
+ auto backend_tensor_handle = support::cpp14::make_unique<CLTensorHandle>(info);
return std::move(backend_tensor_handle);
}
diff --git a/src/graph/backends/GLES/GCDeviceBackend.cpp b/src/graph/backends/GLES/GCDeviceBackend.cpp
index a55215f058..0185598965 100644
--- a/src/graph/backends/GLES/GCDeviceBackend.cpp
+++ b/src/graph/backends/GLES/GCDeviceBackend.cpp
@@ -88,7 +88,8 @@ std::unique_ptr<ITensorHandle> GCDeviceBackend::create_tensor(const Tensor &tens
// Create backend tensor handle
TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
- auto backend_tensor_handle = support::cpp14::make_unique<GCTensorHandle>(info);
+ info.set_data_layout(tensor_desc.layout);
+ auto backend_tensor_handle = support::cpp14::make_unique<GCTensorHandle>(info);
return std::move(backend_tensor_handle);
}
diff --git a/src/graph/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
index 9123196540..def6c39003 100644
--- a/src/graph/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -94,7 +94,8 @@ std::unique_ptr<ITensorHandle> NEDeviceBackend::create_tensor(const Tensor &tens
// Create backend tensor handle
TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
- auto backend_tensor_handle = support::cpp14::make_unique<NETensorHandle>(info);
+ info.set_data_layout(tensor_desc.layout);
+ auto backend_tensor_handle = support::cpp14::make_unique<NETensorHandle>(info);
return std::move(backend_tensor_handle);
}
diff --git a/src/graph/detail/ExecutionHelpers.cpp b/src/graph/detail/ExecutionHelpers.cpp
index 0bb47f2b33..c1304436f6 100644
--- a/src/graph/detail/ExecutionHelpers.cpp
+++ b/src/graph/detail/ExecutionHelpers.cpp
@@ -43,6 +43,24 @@ void default_initialize_backends()
}
}
+void validate_all_nodes(Graph &g)
+{
+ auto &nodes = g.nodes();
+
+ // Create tasks
+ for(auto &node : nodes)
+ {
+ if(node != nullptr)
+ {
+ Target assigned_target = node->assigned_target();
+ auto backend = backends::BackendRegistry::get().find_backend(assigned_target);
+ ARM_COMPUTE_ERROR_ON_MSG(!backend, "Requested backend doesn't exist!");
+ Status status = backend->validate_node(*node);
+ ARM_COMPUTE_ERROR_ON_MSG(!bool(status), status.error_description().c_str());
+ }
+ }
+}
+
void configure_all_tensors(Graph &g)
{
auto &tensors = g.tensors();
@@ -121,24 +139,6 @@ void allocate_all_tensors(Graph &g)
}
}
-void validate_all_nodes(Graph &g)
-{
- auto &nodes = g.nodes();
-
- // Create tasks
- for(auto &node : nodes)
- {
- if(node != nullptr)
- {
- Target assigned_target = node->assigned_target();
- auto backend = backends::BackendRegistry::get().find_backend(assigned_target);
- ARM_COMPUTE_ERROR_ON_MSG(!backend, "Requested backend doesn't exist!");
- Status status = backend->validate_node(*node);
- ARM_COMPUTE_ERROR_ON_MSG(!bool(status), status.error_description().c_str());
- }
- }
-}
-
ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx)
{
ExecutionWorkload workload;
diff --git a/src/graph/mutators/SplitLayerSubTensorMutator.cpp b/src/graph/mutators/SplitLayerSubTensorMutator.cpp
index 179a6c35fb..2a8c029843 100644
--- a/src/graph/mutators/SplitLayerSubTensorMutator.cpp
+++ b/src/graph/mutators/SplitLayerSubTensorMutator.cpp
@@ -75,7 +75,7 @@ void SplitLayerSubTensorMutator::mutate(Graph &g)
Tensor *output_tensor = node->output(i);
const TensorShape output_shape = output_tensor->desc().shape;
Coordinates coords;
- std::tie(std::ignore, coords) = SplitLayerNode::compute_output_shape(input_tensor->desc().shape, num_splits, axis, i);
+ std::tie(std::ignore, coords) = SplitLayerNode::compute_output_descriptor(input_tensor->desc(), num_splits, axis, i);
backends::IDeviceBackend *backend = backends::BackendRegistry::get().find_backend(output_tensor->desc().target);
std::unique_ptr<ITensorHandle> handle = backend->create_subtensor(input_tensor->handle(), output_shape, coords, extend_parent);
diff --git a/src/graph/nodes/ActivationLayerNode.cpp b/src/graph/nodes/ActivationLayerNode.cpp
index 9996d2ce3f..414684cf30 100644
--- a/src/graph/nodes/ActivationLayerNode.cpp
+++ b/src/graph/nodes/ActivationLayerNode.cpp
@@ -65,11 +65,6 @@ TensorDescriptor ActivationLayerNode::configure_output(size_t idx) const
return src->desc();
}
-Status ActivationLayerNode::validate()
-{
- return Status{};
-}
-
NodeType ActivationLayerNode::type() const
{
return NodeType::ActivationLayer;
diff --git a/src/graph/nodes/BatchNormalizationLayerNode.cpp b/src/graph/nodes/BatchNormalizationLayerNode.cpp
index f7b041c828..3ae11fc24d 100644
--- a/src/graph/nodes/BatchNormalizationLayerNode.cpp
+++ b/src/graph/nodes/BatchNormalizationLayerNode.cpp
@@ -76,11 +76,6 @@ TensorDescriptor BatchNormalizationLayerNode::configure_output(size_t idx) const
return src->desc();
}
-Status BatchNormalizationLayerNode::validate()
-{
- return Status{};
-}
-
NodeType BatchNormalizationLayerNode::type() const
{
return NodeType::BatchNormalizationLayer;
diff --git a/src/graph/nodes/ConstNode.cpp b/src/graph/nodes/ConstNode.cpp
index 631971c98f..2f3cd142af 100644
--- a/src/graph/nodes/ConstNode.cpp
+++ b/src/graph/nodes/ConstNode.cpp
@@ -31,7 +31,7 @@ namespace arm_compute
namespace graph
{
ConstNode::ConstNode(TensorDescriptor desc)
- : _desc(desc)
+ : _desc(std::move(desc))
{
_outputs.resize(1, NullTensorID);
}
@@ -54,11 +54,6 @@ TensorDescriptor ConstNode::configure_output(size_t idx) const
return _desc;
}
-Status ConstNode::validate()
-{
- return Status{};
-}
-
NodeType ConstNode::type() const
{
return NodeType::Const;
diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
index eb0c6a1c1a..eaf1f7f035 100644
--- a/src/graph/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
namespace arm_compute
{
@@ -53,18 +54,26 @@ PadStrideInfo ConvolutionLayerNode::convolution_info() const
return _info;
}
-TensorShape ConvolutionLayerNode::compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info)
+TensorDescriptor ConvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const TensorDescriptor &weights_descriptor,
+ const PadStrideInfo &info)
{
unsigned int output_width = 0;
unsigned int output_height = 0;
- std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), info);
- TensorShape output_shape{ input_shape };
- output_shape.set(0, output_width);
- output_shape.set(1, output_height);
- output_shape.set(2, weights_shape[3]);
+ const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
+ const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
- return output_shape;
+ std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
+
+ return output_descriptor;
}
bool ConvolutionLayerNode::forward_descriptors()
@@ -87,10 +96,7 @@ TensorDescriptor ConvolutionLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
- output_info.shape = output_shape;
-
+ TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info);
if(!_out_quant_info.empty())
{
output_info.quant_info = _out_quant_info;
@@ -99,11 +105,6 @@ TensorDescriptor ConvolutionLayerNode::configure_output(size_t idx) const
return output_info;
}
-Status ConvolutionLayerNode::validate()
-{
- return Status{};
-}
-
NodeType ConvolutionLayerNode::type() const
{
return NodeType::ConvolutionLayer;
diff --git a/src/graph/nodes/DepthConcatenateLayerNode.cpp b/src/graph/nodes/DepthConcatenateLayerNode.cpp
index 1c0539744f..08cccc1ff1 100644
--- a/src/graph/nodes/DepthConcatenateLayerNode.cpp
+++ b/src/graph/nodes/DepthConcatenateLayerNode.cpp
@@ -34,7 +34,7 @@ namespace graph
DepthConcatenateLayerNode::DepthConcatenateLayerNode(unsigned int total_nodes)
: _total_nodes(total_nodes), _is_enabled(true)
{
- _input_edges.resize(total_nodes, EmptyEdgeID);
+ _input_edges.resize(_total_nodes, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
}
@@ -48,28 +48,28 @@ bool DepthConcatenateLayerNode::is_enabled() const
return _is_enabled;
}
-TensorShape DepthConcatenateLayerNode::compute_output_shape(const std::vector<TensorShape> &input_shapes)
+TensorDescriptor DepthConcatenateLayerNode::compute_output_descriptor(const std::vector<TensorDescriptor> &input_descriptors)
{
- ARM_COMPUTE_ERROR_ON(input_shapes.size() == 0);
+ ARM_COMPUTE_ERROR_ON(input_descriptors.size() == 0);
- TensorShape output_shape = input_shapes[0];
+ TensorDescriptor output_descriptor = input_descriptors[0];
size_t max_x = 0;
size_t max_y = 0;
size_t depth = 0;
- for(const auto &shape : input_shapes)
+ for(const auto &input_descriptor : input_descriptors)
{
- max_x = std::max(shape.x(), max_x);
- max_y = std::max(shape.y(), max_y);
- depth += shape.z();
+ max_x = std::max(input_descriptor.shape.x(), max_x);
+ max_y = std::max(input_descriptor.shape.y(), max_y);
+ depth += input_descriptor.shape.z();
}
- output_shape.set(0, max_x);
- output_shape.set(1, max_y);
- output_shape.set(2, depth);
+ output_descriptor.shape.set(0, max_x);
+ output_descriptor.shape.set(1, max_y);
+ output_descriptor.shape.set(2, depth);
- return output_shape;
+ return output_descriptor;
}
bool DepthConcatenateLayerNode::forward_descriptors()
@@ -99,27 +99,19 @@ TensorDescriptor DepthConcatenateLayerNode::configure_output(size_t idx) const
if(are_all_inputs_set)
{
- std::vector<TensorShape> inputs_shapes;
+ std::vector<TensorDescriptor> inputs_descriptors;
for(unsigned int i = 0; i < _input_edges.size(); ++i)
{
const Tensor *t = _graph->tensor(input_id(i));
ARM_COMPUTE_ERROR_ON(t == nullptr);
- inputs_shapes.push_back(t->desc().shape);
+ inputs_descriptors.push_back(t->desc());
}
- output_info = input(0)->desc();
- TensorShape output_shape = compute_output_shape(inputs_shapes);
- output_info.shape = output_shape;
+ output_info = compute_output_descriptor(inputs_descriptors);
}
return output_info;
}
-Status DepthConcatenateLayerNode::validate()
-{
- ARM_COMPUTE_UNUSED(_total_nodes);
- return Status{};
-}
-
NodeType DepthConcatenateLayerNode::type() const
{
return NodeType::DepthConcatenateLayer;
diff --git a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
index 67a39029e6..1a6f8d398d 100644
--- a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
namespace arm_compute
{
@@ -53,17 +54,25 @@ PadStrideInfo DepthwiseConvolutionLayerNode::convolution_info() const
return _info;
}
-TensorShape DepthwiseConvolutionLayerNode::compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info)
+TensorDescriptor DepthwiseConvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const TensorDescriptor &weights_descriptor,
+ const PadStrideInfo &info)
{
unsigned int output_width = 0;
unsigned int output_height = 0;
- std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), info);
- TensorShape output_shape{ input_shape };
- output_shape.set(0, output_width);
- output_shape.set(1, output_height);
+ const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
+ const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
- return output_shape;
+ std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height);
+
+ return output_descriptor;
}
bool DepthwiseConvolutionLayerNode::forward_descriptors()
@@ -86,15 +95,7 @@ TensorDescriptor DepthwiseConvolutionLayerNode::configure_output(size_t idx) con
ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
- output_info.shape = output_shape;
- return output_info;
-}
-
-Status DepthwiseConvolutionLayerNode::validate()
-{
- return Status{};
+ return compute_output_descriptor(src->desc(), weights->desc(), _info);
}
NodeType DepthwiseConvolutionLayerNode::type() const
diff --git a/src/graph/nodes/EltwiseLayerNode.cpp b/src/graph/nodes/EltwiseLayerNode.cpp
index b794043f2f..6f1e0eecd9 100644
--- a/src/graph/nodes/EltwiseLayerNode.cpp
+++ b/src/graph/nodes/EltwiseLayerNode.cpp
@@ -65,11 +65,6 @@ TensorDescriptor EltwiseLayerNode::configure_output(size_t idx) const
return src->desc();
}
-Status EltwiseLayerNode::validate()
-{
- return Status{};
-}
-
NodeType EltwiseLayerNode::type() const
{
return NodeType::EltwiseLayer;
diff --git a/src/graph/nodes/FlattenLayerNode.cpp b/src/graph/nodes/FlattenLayerNode.cpp
index 8b847c7056..78b45dc305 100644
--- a/src/graph/nodes/FlattenLayerNode.cpp
+++ b/src/graph/nodes/FlattenLayerNode.cpp
@@ -62,11 +62,6 @@ TensorDescriptor FlattenLayerNode::configure_output(size_t idx) const
return output_desc;
}
-Status FlattenLayerNode::validate()
-{
- return Status{};
-}
-
NodeType FlattenLayerNode::type() const
{
return NodeType::FlattenLayer;
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index cbf2b35ddd..d94a7851ff 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -38,10 +38,11 @@ FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs)
_outputs.resize(1, NullTensorID);
}
-TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_shape, unsigned int num_outputs)
+TensorDescriptor FullyConnectedLayerNode::compute_weights_descriptor(const TensorDescriptor &input_descriptor,
+ unsigned int num_outputs)
{
unsigned int num_weights = 1;
- unsigned int num_dimensions = input_shape.num_dimensions();
+ unsigned int num_dimensions = input_descriptor.shape.num_dimensions();
// Ignore the batch dimension if there is one:
if(num_dimensions == 2 || num_dimensions == 4)
{
@@ -49,20 +50,29 @@ TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_sha
}
for(unsigned int i = 0; i < num_dimensions; i++)
{
- num_weights *= input_shape[i];
+ num_weights *= input_descriptor.shape[i];
}
- return TensorShape(num_weights, num_outputs);
+
+ TensorDescriptor weights_descriptor = input_descriptor;
+ weights_descriptor.shape = TensorShape(num_weights, num_outputs);
+
+ return weights_descriptor;
}
-TensorShape FullyConnectedLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_outputs)
+TensorDescriptor FullyConnectedLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ unsigned int num_outputs)
{
// Note: Only 1D batch space is supported at the moment
- unsigned int batches = input_shape[1];
- if(input_shape.num_dimensions() > 2)
+ unsigned int batches = input_descriptor.shape[1];
+ if(input_descriptor.shape.num_dimensions() > 2)
{
- batches = input_shape[3];
+ batches = input_descriptor.shape[3];
}
- return TensorShape(num_outputs, batches);
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape = TensorShape(num_outputs, batches);
+
+ return output_descriptor;
}
bool FullyConnectedLayerNode::forward_descriptors()
@@ -83,15 +93,7 @@ TensorDescriptor FullyConnectedLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = compute_output_shape(src->desc().shape, _num_outputs);
- output_info.shape = output_shape;
- return output_info;
-}
-
-Status FullyConnectedLayerNode::validate()
-{
- return Status{};
+ return compute_output_descriptor(src->desc(), _num_outputs);
}
NodeType FullyConnectedLayerNode::type() const
diff --git a/src/graph/nodes/InputNode.cpp b/src/graph/nodes/InputNode.cpp
index e912633a66..709eaae14c 100644
--- a/src/graph/nodes/InputNode.cpp
+++ b/src/graph/nodes/InputNode.cpp
@@ -31,7 +31,7 @@ namespace arm_compute
namespace graph
{
InputNode::InputNode(TensorDescriptor desc)
- : _desc(desc)
+ : _desc(std::move(desc))
{
_outputs.resize(1, NullTensorID);
}
@@ -54,11 +54,6 @@ TensorDescriptor InputNode::configure_output(size_t idx) const
return _desc;
}
-Status InputNode::validate()
-{
- return Status{};
-}
-
NodeType InputNode::type() const
{
return NodeType::Input;
diff --git a/src/graph/nodes/NormalizationLayerNode.cpp b/src/graph/nodes/NormalizationLayerNode.cpp
index a9f2fbd066..a7b373860e 100644
--- a/src/graph/nodes/NormalizationLayerNode.cpp
+++ b/src/graph/nodes/NormalizationLayerNode.cpp
@@ -66,11 +66,6 @@ TensorDescriptor NormalizationLayerNode::configure_output(size_t idx) const
return src->desc();
}
-Status NormalizationLayerNode::validate()
-{
- return Status{};
-}
-
NodeType NormalizationLayerNode::type() const
{
return NodeType::NormalizationLayer;
diff --git a/src/graph/nodes/OutputNode.cpp b/src/graph/nodes/OutputNode.cpp
index 4c63bfa20c..8aa249bc2a 100644
--- a/src/graph/nodes/OutputNode.cpp
+++ b/src/graph/nodes/OutputNode.cpp
@@ -48,11 +48,6 @@ TensorDescriptor OutputNode::configure_output(size_t idx) const
return TensorDescriptor();
}
-Status OutputNode::validate()
-{
- return Status{};
-}
-
NodeType OutputNode::type() const
{
return NodeType::Output;
diff --git a/src/graph/nodes/PoolingLayerNode.cpp b/src/graph/nodes/PoolingLayerNode.cpp
index a7b6b3679a..26c145ae31 100644
--- a/src/graph/nodes/PoolingLayerNode.cpp
+++ b/src/graph/nodes/PoolingLayerNode.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
namespace arm_compute
{
@@ -43,20 +44,24 @@ PoolingLayerInfo PoolingLayerNode::pooling_info() const
return _info;
}
-TensorShape PoolingLayerNode::compute_output_shape(TensorShape input_shape, PoolingLayerInfo info)
+TensorDescriptor PoolingLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ PoolingLayerInfo info)
{
- const int pool_size_x = info.is_global_pooling() ? input_shape.x() : info.pool_size().width;
- const int pool_size_y = info.is_global_pooling() ? input_shape.y() : info.pool_size().height;
-
unsigned int pooled_width = 0;
unsigned int pooled_height = 0;
- std::tie(pooled_width, pooled_height) = scaled_dimensions(input_shape.x(), input_shape.y(), pool_size_x, pool_size_y, info.pad_stride_info());
- TensorShape output_shape{ input_shape };
- output_shape.set(0, pooled_width);
- output_shape.set(1, pooled_height);
+ const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
+ const unsigned int pool_size_x = info.is_global_pooling() ? input_width : info.pool_size().width;
+ const unsigned int pool_size_y = info.is_global_pooling() ? input_height : info.pool_size().height;
+
+ std::tie(pooled_width, pooled_height) = scaled_dimensions(input_width, input_height, pool_size_x, pool_size_y, info.pad_stride_info());
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), pooled_width);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), pooled_height);
- return output_shape;
+ return output_descriptor;
}
bool PoolingLayerNode::forward_descriptors()
@@ -79,15 +84,7 @@ TensorDescriptor PoolingLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = compute_output_shape(src->desc().shape, _info);
- output_info.shape = output_shape;
- return output_info;
-}
-
-Status PoolingLayerNode::validate()
-{
- return Status{};
+ return compute_output_descriptor(src->desc(), _info);
}
NodeType PoolingLayerNode::type() const
diff --git a/src/graph/nodes/ReshapeLayer.cpp b/src/graph/nodes/ReshapeLayer.cpp
index 2757f06bd3..58610e9b1c 100644
--- a/src/graph/nodes/ReshapeLayer.cpp
+++ b/src/graph/nodes/ReshapeLayer.cpp
@@ -63,11 +63,6 @@ TensorDescriptor ReshapeLayerNode::configure_output(size_t idx) const
return output_desc;
}
-Status ReshapeLayerNode::validate()
-{
- return Status{};
-}
-
NodeType ReshapeLayerNode::type() const
{
return NodeType::ReshapeLayer;
diff --git a/src/graph/nodes/SoftmaxLayerNode.cpp b/src/graph/nodes/SoftmaxLayerNode.cpp
index b6241e6654..57e556160f 100644
--- a/src/graph/nodes/SoftmaxLayerNode.cpp
+++ b/src/graph/nodes/SoftmaxLayerNode.cpp
@@ -69,11 +69,6 @@ TensorDescriptor SoftmaxLayerNode::configure_output(size_t idx) const
return out_desc;
}
-Status SoftmaxLayerNode::validate()
-{
- return Status{};
-}
-
NodeType SoftmaxLayerNode::type() const
{
return NodeType::SoftmaxLayer;
diff --git a/src/graph/nodes/SplitLayerNode.cpp b/src/graph/nodes/SplitLayerNode.cpp
index c8fb43c2a1..5d46c9dcc9 100644
--- a/src/graph/nodes/SplitLayerNode.cpp
+++ b/src/graph/nodes/SplitLayerNode.cpp
@@ -48,26 +48,25 @@ unsigned int SplitLayerNode::axis() const
return _axis;
}
-std::pair<TensorShape, Coordinates> SplitLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_splits, unsigned int axis, unsigned int idx)
+std::pair<TensorDescriptor, Coordinates> SplitLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ unsigned int num_splits, unsigned int axis, unsigned int idx)
{
- ARM_COMPUTE_ERROR_ON(axis >= input_shape.num_dimensions());
- ARM_COMPUTE_ERROR_ON_MSG(input_shape[axis] % num_splits, "Split should be exact");
+ const unsigned int split_size = input_descriptor.shape[axis] / num_splits;
- const unsigned int split_size = input_shape[axis] / num_splits;
-
- TensorShape output_shape = input_shape;
- output_shape.set(axis, split_size);
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(axis, split_size);
Coordinates coords;
coords.set(axis, idx * split_size);
- return std::make_pair(output_shape, coords);
+ return std::make_pair(output_descriptor, coords);
}
bool SplitLayerNode::forward_descriptors()
{
if(input_id(0) != NullTensorID)
{
+ validate();
for(unsigned int i = 0; i < _outputs.size(); ++i)
{
if(output_id(i) != NullTensorID)
@@ -90,17 +89,19 @@ TensorDescriptor SplitLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- TensorShape output_shape;
-
- TensorDescriptor output_info = src->desc();
- std::tie(output_shape, std::ignore) = compute_output_shape(src->desc().shape, _num_splits, _axis, idx);
- output_info.shape = output_shape;
+ TensorDescriptor output_info;
+ std::tie(output_info, std::ignore) = compute_output_descriptor(src->desc(), _num_splits, _axis, idx);
return output_info;
}
-Status SplitLayerNode::validate()
+Status SplitLayerNode::validate() const
{
+ const Tensor *src = input(0);
+ ARM_COMPUTE_RETURN_ERROR_ON(src == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(_axis >= src->desc().shape.num_dimensions());
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->desc().shape[_axis] % _num_splits, "Split should be exact");
+
return Status{};
}
diff --git a/src/graph/printers/DotGraphPrinter.cpp b/src/graph/printers/DotGraphPrinter.cpp
index 47b1bb56bf..61cf42356f 100644
--- a/src/graph/printers/DotGraphPrinter.cpp
+++ b/src/graph/printers/DotGraphPrinter.cpp
@@ -164,7 +164,7 @@ void DotGraphPrinter::print_edges(const Graph &g, std::ostream &os)
os << source_node_id << " -> " << sink_node_id << " ";
const Tensor *t = e->tensor();
ARM_COMPUTE_ERROR_ON(t == nullptr);
- os << R"([label = ")" << t->desc().shape << R"( \n )" << t->desc().data_type << R"("])";
+ os << R"([label = ")" << t->desc().shape << R"( \n )" << t->desc().data_type << R"( \n )" << t->desc().layout << R"("])";
os << ";\n";
}
}