aboutsummaryrefslogtreecommitdiff
path: root/src/graph/nodes
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-04-27 19:07:19 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:51:17 +0000
commitcac13b1cfd593889271f8e2191be2039b8d88f36 (patch)
treed1c5196877d7fbd5dcfbb9f9003faf6035f82a33 /src/graph/nodes
parentad0c7388f6261989a268ffb2d042f2bd80736e3f (diff)
downloadComputeLibrary-cac13b1cfd593889271f8e2191be2039b8d88f36.tar.gz
COMPMID-1097: Port mobilenet to NHWC
Change-Id: I789065bfa0d4ef133388e1904c5caf31e450f80f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/129495 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/graph/nodes')
-rw-r--r--src/graph/nodes/ActivationLayerNode.cpp5
-rw-r--r--src/graph/nodes/BatchNormalizationLayerNode.cpp5
-rw-r--r--src/graph/nodes/ConstNode.cpp7
-rw-r--r--src/graph/nodes/ConvolutionLayerNode.cpp33
-rw-r--r--src/graph/nodes/DepthConcatenateLayerNode.cpp38
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayerNode.cpp31
-rw-r--r--src/graph/nodes/EltwiseLayerNode.cpp5
-rw-r--r--src/graph/nodes/FlattenLayerNode.cpp5
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp38
-rw-r--r--src/graph/nodes/InputNode.cpp7
-rw-r--r--src/graph/nodes/NormalizationLayerNode.cpp5
-rw-r--r--src/graph/nodes/OutputNode.cpp5
-rw-r--r--src/graph/nodes/PoolingLayerNode.cpp33
-rw-r--r--src/graph/nodes/ReshapeLayer.cpp5
-rw-r--r--src/graph/nodes/SoftmaxLayerNode.cpp5
-rw-r--r--src/graph/nodes/SplitLayerNode.cpp29
16 files changed, 100 insertions, 156 deletions
diff --git a/src/graph/nodes/ActivationLayerNode.cpp b/src/graph/nodes/ActivationLayerNode.cpp
index 9996d2ce3f..414684cf30 100644
--- a/src/graph/nodes/ActivationLayerNode.cpp
+++ b/src/graph/nodes/ActivationLayerNode.cpp
@@ -65,11 +65,6 @@ TensorDescriptor ActivationLayerNode::configure_output(size_t idx) const
return src->desc();
}
-Status ActivationLayerNode::validate()
-{
- return Status{};
-}
-
NodeType ActivationLayerNode::type() const
{
return NodeType::ActivationLayer;
diff --git a/src/graph/nodes/BatchNormalizationLayerNode.cpp b/src/graph/nodes/BatchNormalizationLayerNode.cpp
index f7b041c828..3ae11fc24d 100644
--- a/src/graph/nodes/BatchNormalizationLayerNode.cpp
+++ b/src/graph/nodes/BatchNormalizationLayerNode.cpp
@@ -76,11 +76,6 @@ TensorDescriptor BatchNormalizationLayerNode::configure_output(size_t idx) const
return src->desc();
}
-Status BatchNormalizationLayerNode::validate()
-{
- return Status{};
-}
-
NodeType BatchNormalizationLayerNode::type() const
{
return NodeType::BatchNormalizationLayer;
diff --git a/src/graph/nodes/ConstNode.cpp b/src/graph/nodes/ConstNode.cpp
index 631971c98f..2f3cd142af 100644
--- a/src/graph/nodes/ConstNode.cpp
+++ b/src/graph/nodes/ConstNode.cpp
@@ -31,7 +31,7 @@ namespace arm_compute
namespace graph
{
ConstNode::ConstNode(TensorDescriptor desc)
- : _desc(desc)
+ : _desc(std::move(desc))
{
_outputs.resize(1, NullTensorID);
}
@@ -54,11 +54,6 @@ TensorDescriptor ConstNode::configure_output(size_t idx) const
return _desc;
}
-Status ConstNode::validate()
-{
- return Status{};
-}
-
NodeType ConstNode::type() const
{
return NodeType::Const;
diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
index eb0c6a1c1a..eaf1f7f035 100644
--- a/src/graph/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
namespace arm_compute
{
@@ -53,18 +54,26 @@ PadStrideInfo ConvolutionLayerNode::convolution_info() const
return _info;
}
-TensorShape ConvolutionLayerNode::compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info)
+TensorDescriptor ConvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const TensorDescriptor &weights_descriptor,
+ const PadStrideInfo &info)
{
unsigned int output_width = 0;
unsigned int output_height = 0;
- std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), info);
- TensorShape output_shape{ input_shape };
- output_shape.set(0, output_width);
- output_shape.set(1, output_height);
- output_shape.set(2, weights_shape[3]);
+ const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
+ const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
- return output_shape;
+ std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
+
+ return output_descriptor;
}
bool ConvolutionLayerNode::forward_descriptors()
@@ -87,10 +96,7 @@ TensorDescriptor ConvolutionLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
- output_info.shape = output_shape;
-
+ TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info);
if(!_out_quant_info.empty())
{
output_info.quant_info = _out_quant_info;
@@ -99,11 +105,6 @@ TensorDescriptor ConvolutionLayerNode::configure_output(size_t idx) const
return output_info;
}
-Status ConvolutionLayerNode::validate()
-{
- return Status{};
-}
-
NodeType ConvolutionLayerNode::type() const
{
return NodeType::ConvolutionLayer;
diff --git a/src/graph/nodes/DepthConcatenateLayerNode.cpp b/src/graph/nodes/DepthConcatenateLayerNode.cpp
index 1c0539744f..08cccc1ff1 100644
--- a/src/graph/nodes/DepthConcatenateLayerNode.cpp
+++ b/src/graph/nodes/DepthConcatenateLayerNode.cpp
@@ -34,7 +34,7 @@ namespace graph
DepthConcatenateLayerNode::DepthConcatenateLayerNode(unsigned int total_nodes)
: _total_nodes(total_nodes), _is_enabled(true)
{
- _input_edges.resize(total_nodes, EmptyEdgeID);
+ _input_edges.resize(_total_nodes, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
}
@@ -48,28 +48,28 @@ bool DepthConcatenateLayerNode::is_enabled() const
return _is_enabled;
}
-TensorShape DepthConcatenateLayerNode::compute_output_shape(const std::vector<TensorShape> &input_shapes)
+TensorDescriptor DepthConcatenateLayerNode::compute_output_descriptor(const std::vector<TensorDescriptor> &input_descriptors)
{
- ARM_COMPUTE_ERROR_ON(input_shapes.size() == 0);
+ ARM_COMPUTE_ERROR_ON(input_descriptors.size() == 0);
- TensorShape output_shape = input_shapes[0];
+ TensorDescriptor output_descriptor = input_descriptors[0];
size_t max_x = 0;
size_t max_y = 0;
size_t depth = 0;
- for(const auto &shape : input_shapes)
+ for(const auto &input_descriptor : input_descriptors)
{
- max_x = std::max(shape.x(), max_x);
- max_y = std::max(shape.y(), max_y);
- depth += shape.z();
+ max_x = std::max(input_descriptor.shape.x(), max_x);
+ max_y = std::max(input_descriptor.shape.y(), max_y);
+ depth += input_descriptor.shape.z();
}
- output_shape.set(0, max_x);
- output_shape.set(1, max_y);
- output_shape.set(2, depth);
+ output_descriptor.shape.set(0, max_x);
+ output_descriptor.shape.set(1, max_y);
+ output_descriptor.shape.set(2, depth);
- return output_shape;
+ return output_descriptor;
}
bool DepthConcatenateLayerNode::forward_descriptors()
@@ -99,27 +99,19 @@ TensorDescriptor DepthConcatenateLayerNode::configure_output(size_t idx) const
if(are_all_inputs_set)
{
- std::vector<TensorShape> inputs_shapes;
+ std::vector<TensorDescriptor> inputs_descriptors;
for(unsigned int i = 0; i < _input_edges.size(); ++i)
{
const Tensor *t = _graph->tensor(input_id(i));
ARM_COMPUTE_ERROR_ON(t == nullptr);
- inputs_shapes.push_back(t->desc().shape);
+ inputs_descriptors.push_back(t->desc());
}
- output_info = input(0)->desc();
- TensorShape output_shape = compute_output_shape(inputs_shapes);
- output_info.shape = output_shape;
+ output_info = compute_output_descriptor(inputs_descriptors);
}
return output_info;
}
-Status DepthConcatenateLayerNode::validate()
-{
- ARM_COMPUTE_UNUSED(_total_nodes);
- return Status{};
-}
-
NodeType DepthConcatenateLayerNode::type() const
{
return NodeType::DepthConcatenateLayer;
diff --git a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
index 67a39029e6..1a6f8d398d 100644
--- a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
namespace arm_compute
{
@@ -53,17 +54,25 @@ PadStrideInfo DepthwiseConvolutionLayerNode::convolution_info() const
return _info;
}
-TensorShape DepthwiseConvolutionLayerNode::compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info)
+TensorDescriptor DepthwiseConvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const TensorDescriptor &weights_descriptor,
+ const PadStrideInfo &info)
{
unsigned int output_width = 0;
unsigned int output_height = 0;
- std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), info);
- TensorShape output_shape{ input_shape };
- output_shape.set(0, output_width);
- output_shape.set(1, output_height);
+ const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
+ const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
- return output_shape;
+ std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height);
+
+ return output_descriptor;
}
bool DepthwiseConvolutionLayerNode::forward_descriptors()
@@ -86,15 +95,7 @@ TensorDescriptor DepthwiseConvolutionLayerNode::configure_output(size_t idx) con
ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
- output_info.shape = output_shape;
- return output_info;
-}
-
-Status DepthwiseConvolutionLayerNode::validate()
-{
- return Status{};
+ return compute_output_descriptor(src->desc(), weights->desc(), _info);
}
NodeType DepthwiseConvolutionLayerNode::type() const
diff --git a/src/graph/nodes/EltwiseLayerNode.cpp b/src/graph/nodes/EltwiseLayerNode.cpp
index b794043f2f..6f1e0eecd9 100644
--- a/src/graph/nodes/EltwiseLayerNode.cpp
+++ b/src/graph/nodes/EltwiseLayerNode.cpp
@@ -65,11 +65,6 @@ TensorDescriptor EltwiseLayerNode::configure_output(size_t idx) const
return src->desc();
}
-Status EltwiseLayerNode::validate()
-{
- return Status{};
-}
-
NodeType EltwiseLayerNode::type() const
{
return NodeType::EltwiseLayer;
diff --git a/src/graph/nodes/FlattenLayerNode.cpp b/src/graph/nodes/FlattenLayerNode.cpp
index 8b847c7056..78b45dc305 100644
--- a/src/graph/nodes/FlattenLayerNode.cpp
+++ b/src/graph/nodes/FlattenLayerNode.cpp
@@ -62,11 +62,6 @@ TensorDescriptor FlattenLayerNode::configure_output(size_t idx) const
return output_desc;
}
-Status FlattenLayerNode::validate()
-{
- return Status{};
-}
-
NodeType FlattenLayerNode::type() const
{
return NodeType::FlattenLayer;
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index cbf2b35ddd..d94a7851ff 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -38,10 +38,11 @@ FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs)
_outputs.resize(1, NullTensorID);
}
-TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_shape, unsigned int num_outputs)
+TensorDescriptor FullyConnectedLayerNode::compute_weights_descriptor(const TensorDescriptor &input_descriptor,
+ unsigned int num_outputs)
{
unsigned int num_weights = 1;
- unsigned int num_dimensions = input_shape.num_dimensions();
+ unsigned int num_dimensions = input_descriptor.shape.num_dimensions();
// Ignore the batch dimension if there is one:
if(num_dimensions == 2 || num_dimensions == 4)
{
@@ -49,20 +50,29 @@ TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_sha
}
for(unsigned int i = 0; i < num_dimensions; i++)
{
- num_weights *= input_shape[i];
+ num_weights *= input_descriptor.shape[i];
}
- return TensorShape(num_weights, num_outputs);
+
+ TensorDescriptor weights_descriptor = input_descriptor;
+ weights_descriptor.shape = TensorShape(num_weights, num_outputs);
+
+ return weights_descriptor;
}
-TensorShape FullyConnectedLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_outputs)
+TensorDescriptor FullyConnectedLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ unsigned int num_outputs)
{
// Note: Only 1D batch space is supported at the moment
- unsigned int batches = input_shape[1];
- if(input_shape.num_dimensions() > 2)
+ unsigned int batches = input_descriptor.shape[1];
+ if(input_descriptor.shape.num_dimensions() > 2)
{
- batches = input_shape[3];
+ batches = input_descriptor.shape[3];
}
- return TensorShape(num_outputs, batches);
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape = TensorShape(num_outputs, batches);
+
+ return output_descriptor;
}
bool FullyConnectedLayerNode::forward_descriptors()
@@ -83,15 +93,7 @@ TensorDescriptor FullyConnectedLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = compute_output_shape(src->desc().shape, _num_outputs);
- output_info.shape = output_shape;
- return output_info;
-}
-
-Status FullyConnectedLayerNode::validate()
-{
- return Status{};
+ return compute_output_descriptor(src->desc(), _num_outputs);
}
NodeType FullyConnectedLayerNode::type() const
diff --git a/src/graph/nodes/InputNode.cpp b/src/graph/nodes/InputNode.cpp
index e912633a66..709eaae14c 100644
--- a/src/graph/nodes/InputNode.cpp
+++ b/src/graph/nodes/InputNode.cpp
@@ -31,7 +31,7 @@ namespace arm_compute
namespace graph
{
InputNode::InputNode(TensorDescriptor desc)
- : _desc(desc)
+ : _desc(std::move(desc))
{
_outputs.resize(1, NullTensorID);
}
@@ -54,11 +54,6 @@ TensorDescriptor InputNode::configure_output(size_t idx) const
return _desc;
}
-Status InputNode::validate()
-{
- return Status{};
-}
-
NodeType InputNode::type() const
{
return NodeType::Input;
diff --git a/src/graph/nodes/NormalizationLayerNode.cpp b/src/graph/nodes/NormalizationLayerNode.cpp
index a9f2fbd066..a7b373860e 100644
--- a/src/graph/nodes/NormalizationLayerNode.cpp
+++ b/src/graph/nodes/NormalizationLayerNode.cpp
@@ -66,11 +66,6 @@ TensorDescriptor NormalizationLayerNode::configure_output(size_t idx) const
return src->desc();
}
-Status NormalizationLayerNode::validate()
-{
- return Status{};
-}
-
NodeType NormalizationLayerNode::type() const
{
return NodeType::NormalizationLayer;
diff --git a/src/graph/nodes/OutputNode.cpp b/src/graph/nodes/OutputNode.cpp
index 4c63bfa20c..8aa249bc2a 100644
--- a/src/graph/nodes/OutputNode.cpp
+++ b/src/graph/nodes/OutputNode.cpp
@@ -48,11 +48,6 @@ TensorDescriptor OutputNode::configure_output(size_t idx) const
return TensorDescriptor();
}
-Status OutputNode::validate()
-{
- return Status{};
-}
-
NodeType OutputNode::type() const
{
return NodeType::Output;
diff --git a/src/graph/nodes/PoolingLayerNode.cpp b/src/graph/nodes/PoolingLayerNode.cpp
index a7b6b3679a..26c145ae31 100644
--- a/src/graph/nodes/PoolingLayerNode.cpp
+++ b/src/graph/nodes/PoolingLayerNode.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
namespace arm_compute
{
@@ -43,20 +44,24 @@ PoolingLayerInfo PoolingLayerNode::pooling_info() const
return _info;
}
-TensorShape PoolingLayerNode::compute_output_shape(TensorShape input_shape, PoolingLayerInfo info)
+TensorDescriptor PoolingLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ PoolingLayerInfo info)
{
- const int pool_size_x = info.is_global_pooling() ? input_shape.x() : info.pool_size().width;
- const int pool_size_y = info.is_global_pooling() ? input_shape.y() : info.pool_size().height;
-
unsigned int pooled_width = 0;
unsigned int pooled_height = 0;
- std::tie(pooled_width, pooled_height) = scaled_dimensions(input_shape.x(), input_shape.y(), pool_size_x, pool_size_y, info.pad_stride_info());
- TensorShape output_shape{ input_shape };
- output_shape.set(0, pooled_width);
- output_shape.set(1, pooled_height);
+ const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
+ const unsigned int pool_size_x = info.is_global_pooling() ? input_width : info.pool_size().width;
+ const unsigned int pool_size_y = info.is_global_pooling() ? input_height : info.pool_size().height;
+
+ std::tie(pooled_width, pooled_height) = scaled_dimensions(input_width, input_height, pool_size_x, pool_size_y, info.pad_stride_info());
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), pooled_width);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), pooled_height);
- return output_shape;
+ return output_descriptor;
}
bool PoolingLayerNode::forward_descriptors()
@@ -79,15 +84,7 @@ TensorDescriptor PoolingLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = compute_output_shape(src->desc().shape, _info);
- output_info.shape = output_shape;
- return output_info;
-}
-
-Status PoolingLayerNode::validate()
-{
- return Status{};
+ return compute_output_descriptor(src->desc(), _info);
}
NodeType PoolingLayerNode::type() const
diff --git a/src/graph/nodes/ReshapeLayer.cpp b/src/graph/nodes/ReshapeLayer.cpp
index 2757f06bd3..58610e9b1c 100644
--- a/src/graph/nodes/ReshapeLayer.cpp
+++ b/src/graph/nodes/ReshapeLayer.cpp
@@ -63,11 +63,6 @@ TensorDescriptor ReshapeLayerNode::configure_output(size_t idx) const
return output_desc;
}
-Status ReshapeLayerNode::validate()
-{
- return Status{};
-}
-
NodeType ReshapeLayerNode::type() const
{
return NodeType::ReshapeLayer;
diff --git a/src/graph/nodes/SoftmaxLayerNode.cpp b/src/graph/nodes/SoftmaxLayerNode.cpp
index b6241e6654..57e556160f 100644
--- a/src/graph/nodes/SoftmaxLayerNode.cpp
+++ b/src/graph/nodes/SoftmaxLayerNode.cpp
@@ -69,11 +69,6 @@ TensorDescriptor SoftmaxLayerNode::configure_output(size_t idx) const
return out_desc;
}
-Status SoftmaxLayerNode::validate()
-{
- return Status{};
-}
-
NodeType SoftmaxLayerNode::type() const
{
return NodeType::SoftmaxLayer;
diff --git a/src/graph/nodes/SplitLayerNode.cpp b/src/graph/nodes/SplitLayerNode.cpp
index c8fb43c2a1..5d46c9dcc9 100644
--- a/src/graph/nodes/SplitLayerNode.cpp
+++ b/src/graph/nodes/SplitLayerNode.cpp
@@ -48,26 +48,25 @@ unsigned int SplitLayerNode::axis() const
return _axis;
}
-std::pair<TensorShape, Coordinates> SplitLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_splits, unsigned int axis, unsigned int idx)
+std::pair<TensorDescriptor, Coordinates> SplitLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ unsigned int num_splits, unsigned int axis, unsigned int idx)
{
- ARM_COMPUTE_ERROR_ON(axis >= input_shape.num_dimensions());
- ARM_COMPUTE_ERROR_ON_MSG(input_shape[axis] % num_splits, "Split should be exact");
+ const unsigned int split_size = input_descriptor.shape[axis] / num_splits;
- const unsigned int split_size = input_shape[axis] / num_splits;
-
- TensorShape output_shape = input_shape;
- output_shape.set(axis, split_size);
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(axis, split_size);
Coordinates coords;
coords.set(axis, idx * split_size);
- return std::make_pair(output_shape, coords);
+ return std::make_pair(output_descriptor, coords);
}
bool SplitLayerNode::forward_descriptors()
{
if(input_id(0) != NullTensorID)
{
+ validate();
for(unsigned int i = 0; i < _outputs.size(); ++i)
{
if(output_id(i) != NullTensorID)
@@ -90,17 +89,19 @@ TensorDescriptor SplitLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- TensorShape output_shape;
-
- TensorDescriptor output_info = src->desc();
- std::tie(output_shape, std::ignore) = compute_output_shape(src->desc().shape, _num_splits, _axis, idx);
- output_info.shape = output_shape;
+ TensorDescriptor output_info;
+ std::tie(output_info, std::ignore) = compute_output_descriptor(src->desc(), _num_splits, _axis, idx);
return output_info;
}
-Status SplitLayerNode::validate()
+Status SplitLayerNode::validate() const
{
+ const Tensor *src = input(0);
+ ARM_COMPUTE_RETURN_ERROR_ON(src == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(_axis >= src->desc().shape.num_dimensions());
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->desc().shape[_axis] % _num_splits, "Split should be exact");
+
return Status{};
}