aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-05-16 15:52:35 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:52:54 +0000
commit087eaf67dc4be4234a7fcfc3b109c1e4f5e7dd5e (patch)
tree5cb63112eb567bbc683dfb9a0f51ce4960cf800d
parent1b993389a3ac0cd1b0edc0b11e92fbdee127576f (diff)
downloadComputeLibrary-087eaf67dc4be4234a7fcfc3b109c1e4f5e7dd5e.tar.gz
COMPMID-1176: Adds nodes to the graph.
Nodes added: -ChannelShuffle -Resize -Deconvolution -Dummy (used for performance analysis and debugging) Change-Id: Iad19960cbbce6e25532f77bfd34b2292c0ca9781 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/131672 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/graph/GraphBuilder.h51
-rw-r--r--arm_compute/graph/TypePrinter.h27
-rw-r--r--arm_compute/graph/Types.h7
-rw-r--r--arm_compute/graph/backends/ValidateHelpers.h23
-rw-r--r--arm_compute/graph/frontend/Layers.h120
-rw-r--r--arm_compute/graph/frontend/Types.h2
-rw-r--r--arm_compute/graph/nodes/ChannelShuffleLayerNode.h59
-rw-r--r--arm_compute/graph/nodes/DeconvolutionLayerNode.h79
-rw-r--r--arm_compute/graph/nodes/DummyNode.h61
-rw-r--r--arm_compute/graph/nodes/Nodes.h4
-rw-r--r--arm_compute/graph/nodes/NodesFwd.h4
-rw-r--r--arm_compute/graph/nodes/ResizeLayerNode.h63
-rw-r--r--src/graph/GraphBuilder.cpp83
-rw-r--r--src/graph/backends/CL/CLFunctionsFactory.cpp113
-rw-r--r--src/graph/backends/CL/CLNodeValidator.cpp2
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp79
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp3
-rw-r--r--src/graph/nodes/ChannelShuffleLayerNode.cpp78
-rw-r--r--src/graph/nodes/DeconvolutionLayerNode.cpp113
-rw-r--r--src/graph/nodes/DummyNode.cpp78
-rw-r--r--src/graph/nodes/ResizeLayerNode.cpp90
21 files changed, 1134 insertions, 5 deletions
diff --git a/arm_compute/graph/GraphBuilder.h b/arm_compute/graph/GraphBuilder.h
index 04edf673d1..6359e05a63 100644
--- a/arm_compute/graph/GraphBuilder.h
+++ b/arm_compute/graph/GraphBuilder.h
@@ -97,6 +97,16 @@ public:
static NodeID add_batch_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, float epsilon,
ITensorAccessorUPtr mean_accessor = nullptr, ITensorAccessorUPtr var_accessor = nullptr,
ITensorAccessorUPtr beta_accessor = nullptr, ITensorAccessorUPtr gamma_accessor = nullptr);
+ /** Adds an channel shuffle layer node to the graph
+ *
+ * @param[in] g Graph to add the node to
+ * @param[in] params Common node parameters
+ * @param[in] input Input to the activation layer node as a NodeID-Index pair
+ * @param[in] num_groups Number of groups
+ *
+ * @return Node ID of the created node, EmptyNodeID in case of error
+ */
+ static NodeID add_channel_shuffle_node(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_groups);
/** Adds a convolution layer node to the graph
*
* TODO (COMPMID-1113): Add a graph descriptor for convolution layer node
@@ -123,6 +133,23 @@ public:
ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr,
const QuantizationInfo weights_quant_info = QuantizationInfo(),
const QuantizationInfo out_quant_info = QuantizationInfo());
+ /** Adds a deconvolution layer node to the graph
+ *
+ * @param[in] g Graph to add the node to
+ * @param[in] params Common node parameters
+ * @param[in] input Input to the convolution layer node as a NodeID-Index pair
+ * @param[in] kernel_spatial_extend Spatial extend of convolution kernels
+ * @param[in] depth Number of convolution kernels
+ * @param[in] deconv_info Convolution layer information
+ * @param[in] inner_border Inner border (right, top)
+ * @param[in] weights_accessor (Optional) Accessor of the weights node data
+ * @param[in] bias_accessor (Optional) Accessor of the bias node data
+ *
+ * @return Node ID of the created node, EmptyNodeID in case of error
+ */
+ static NodeID add_deconvolution_node(Graph &g, NodeParams params, NodeIdxPair input,
+ Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo deconv_info, Size2D inner_border,
+ ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr);
/** Adds a depth concatenate node to the graph
*
* @param[in] g Graph to add the node to
@@ -161,6 +188,18 @@ public:
* @return Node ID of the created node, EmptyNodeID in case of error
*/
static NodeID add_elementwise_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation);
+ /** Adds a Dummy node to the graph
+ *
+ * @note this node if for debugging purposes. Just alters the shape of the graph pipeline as requested.
+ *
+ * @param[in] g Graph to add the node to
+ * @param[in] params Common node parameters
+ * @param[in] input Input to the dummy node as a NodeID-Index pair
+ * @param[in] shape Output shape
+ *
+ * @return Node ID of the created node, EmptyNodeID in case of error
+ */
+ static NodeID add_dummy_node(Graph &g, NodeParams params, NodeIdxPair input, TensorShape shape);
/** Adds a flatten layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -213,6 +252,18 @@ public:
* @return Node ID of the created node, EmptyNodeID in case of error
*/
static NodeID add_reshape_node(Graph &g, NodeParams params, NodeIdxPair input, TensorShape shape);
+ /** Adds a resize layer node to the graph
+ *
+ * @param[in] g Graph to add the node to
+ * @param[in] params Common node parameters
+ * @param[in] input Input to the reshape layer node as a NodeID-Index pair
+ * @param[in] policy Interpolation policy
+ * @param[in] width_scale Width scaling factor
+ * @param[in] height_scale Height scaling factor
+ *
+ * @return Node ID of the created node, EmptyNodeID in case of error
+ */
+ static NodeID add_resize_node(Graph &g, NodeParams params, NodeIdxPair input, InterpolationPolicy policy, float width_scale, float height_scale);
/** Adds a scale layer node to the graph
* This layer computes a product of the input with a scale (read from mul_accessor) and it applies an offset (read from add_accessor).
* output = input * mul_w + add_w
diff --git a/arm_compute/graph/TypePrinter.h b/arm_compute/graph/TypePrinter.h
index 6babd3961d..edce77c4ad 100644
--- a/arm_compute/graph/TypePrinter.h
+++ b/arm_compute/graph/TypePrinter.h
@@ -352,6 +352,33 @@ inline ::std::ostream &operator<<(::std::ostream &os, const QuantizationInfo &qu
<< "Offset:" << quantization_info.offset;
return os;
}
+
+/** Formatted output of the Interpolation policy type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] policy Interpolation policy to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const InterpolationPolicy &policy)
+{
+ switch(policy)
+ {
+ case InterpolationPolicy::NEAREST_NEIGHBOR:
+ os << "NEAREST NEIGHBOR";
+ break;
+ case InterpolationPolicy::BILINEAR:
+ os << "BILINEAR";
+ break;
+ case InterpolationPolicy::AREA:
+ os << "AREA";
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+
+ return os;
+}
} // namespace graph
} // namespace arm_compute
#endif /* __ARM_COMPUTE_GRAPH_TYPE_PRINTER_H__ */
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index d4e4f99377..9cfede9cf3 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -52,6 +52,7 @@ using arm_compute::PadStrideInfo;
using arm_compute::PoolingLayerInfo;
using arm_compute::PoolingType;
using arm_compute::DimensionRoundingType;
+using arm_compute::InterpolationPolicy;
/** TODO (geopin01): Make ids strongly typed */
using TensorID = unsigned int;
@@ -128,7 +129,9 @@ enum class NodeType
{
ActivationLayer,
BatchNormalizationLayer,
+ ChannelShuffleLayer,
ConvolutionLayer,
+ DeconvolutionLayer,
DepthConcatenateLayer,
DepthwiseConvolutionLayer,
EltwiseLayer,
@@ -137,13 +140,15 @@ enum class NodeType
NormalizationLayer,
PoolingLayer,
ReshapeLayer,
- ScaleLayer,
+ ResizeLayer,
SoftmaxLayer,
SplitLayer,
Input,
Output,
Const,
+
+ Dummy
};
/** Backend Memory Manager affinity **/
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index db3f8ba4f9..189fbdc9c7 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -52,6 +52,29 @@ inline arm_compute::ITensorInfo *get_backing_tensor_info(arm_compute::graph::Ten
return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : tensor->handle()->tensor().info();
}
+/** Validates a Channel Shuffle layer node
+ *
+ * @tparam ChannelShuffleLayer Channel Shuffle layer function type
+ *
+ * @param[in] node Node to validate
+ *
+ * @return Status
+ */
+template <typename ChannelShuffleLayer>
+Status validate_channel_shuffle_layer(ChannelShuffleLayerNode &node)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ChannelShuffle node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
+
+ // Extract IO and info
+ arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
+ arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
+ const unsigned int num_groups = node.num_groups();
+
+ return ChannelShuffleLayer::validate(input, output, num_groups);
+}
+
/** Validates a Convolution layer node
*
* @tparam ConvolutionLayer Default Convolution layer function type
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index a97684453c..197d2ea409 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -154,6 +154,30 @@ private:
float _epsilon;
};
+/** Channel Shuffle Layer */
+class ChannelShuffleLayer final : public ILayer
+{
+public:
+ /** Construct a Channel Shuffle layer.
+ *
+ * @param[in] num_groups Number of groups
+ */
+ ChannelShuffleLayer(unsigned int num_groups)
+ : _num_groups(num_groups)
+ {
+ }
+
+ NodeID create_layer(IStream &s) override
+ {
+ NodeParams common_params = { name(), s.hints().target_hint };
+ NodeIdxPair input = { s.tail_node(), 0 };
+ return GraphBuilder::add_channel_shuffle_node(s.graph(), common_params, input, _num_groups);
+ }
+
+private:
+ unsigned int _num_groups;
+};
+
/** Convolution Layer */
class ConvolutionLayer final : public ILayer
{
@@ -213,6 +237,56 @@ private:
const QuantizationInfo _out_quant_info;
};
+/** Deconvolution Layer */
+class DeconvolutionLayer final : public ILayer
+{
+public:
+ /** Construct a convolution layer.
+ *
+ * @param[in] conv_width Convolution width.
+ * @param[in] conv_height Convolution height.
+ * @param[in] ofm Output feature map.
+ * @param[in] weights Accessor to get kernel weights from.
+ * @param[in] bias Accessor to get kernel bias from.
+ * @param[in] deconv_info Padding and stride information.
+ * @param[in] inner_border Inner border padding (right, top)
+ */
+ DeconvolutionLayer(unsigned int conv_width,
+ unsigned int conv_height,
+ unsigned int ofm,
+ ITensorAccessorUPtr weights,
+ ITensorAccessorUPtr bias,
+ PadStrideInfo deconv_info,
+ Size2D inner_border)
+ : _conv_width(conv_width),
+ _conv_height(conv_height),
+ _ofm(ofm),
+ _deconv_info(std::move(deconv_info)),
+ _inner_border(inner_border),
+ _weights(std::move(weights)),
+ _bias(std::move(bias))
+ {
+ }
+
+ NodeID create_layer(IStream &s) override
+ {
+ NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = { name(), s.hints().target_hint };
+ return GraphBuilder::add_deconvolution_node(s.graph(), common_params, input,
+ Size2D(_conv_width, _conv_height), _ofm, _deconv_info, _inner_border,
+ std::move(_weights), std::move(_bias));
+ }
+
+private:
+ unsigned int _conv_width;
+ unsigned int _conv_height;
+ unsigned int _ofm;
+ const PadStrideInfo _deconv_info;
+ Size2D _inner_border;
+ ITensorAccessorUPtr _weights;
+ ITensorAccessorUPtr _bias;
+};
+
/** Depthwise Convolution Layer */
class DepthwiseConvolutionLayer final : public ILayer
{
@@ -260,6 +334,30 @@ private:
const QuantizationInfo _quant_info;
};
+/** Dummy Layer */
+class DummyLayer final : public ILayer
+{
+public:
+ /** Construct an input layer.
+ *
+ * @param[in] shape Output shape
+ */
+ DummyLayer(TensorShape shape)
+ : _shape(shape)
+ {
+ }
+
+ NodeID create_layer(IStream &s) override
+ {
+ NodeParams common_params = { name(), s.hints().target_hint };
+ NodeIdxPair input = { s.tail_node(), 0 };
+ return GraphBuilder::add_dummy_node(s.graph(), common_params, input, _shape);
+ }
+
+private:
+ TensorShape _shape;
+};
+
/** Flatten Layer */
class FlattenLayer final : public ILayer
{
@@ -380,6 +478,28 @@ private:
TensorShape _shape;
};
+/** Resize Layer */
+class ResizeLayer final : public ILayer
+{
+public:
+ ResizeLayer(InterpolationPolicy policy, float width_scale, float height_scale)
+ : _policy(policy), _width_scale(width_scale), _height_scale(height_scale)
+ {
+ }
+
+ NodeID create_layer(IStream &s) override
+ {
+ NodeParams common_params = { name(), s.hints().target_hint };
+ NodeIdxPair input = { s.tail_node(), 0 };
+ return GraphBuilder::add_resize_node(s.graph(), common_params, input, _policy, _width_scale, _height_scale);
+ }
+
+private:
+ InterpolationPolicy _policy;
+ float _width_scale;
+ float _height_scale;
+};
+
/** Scale Layer */
class ScaleLayer final : public ILayer
{
diff --git a/arm_compute/graph/frontend/Types.h b/arm_compute/graph/frontend/Types.h
index 47893613c7..cd579e2119 100644
--- a/arm_compute/graph/frontend/Types.h
+++ b/arm_compute/graph/frontend/Types.h
@@ -50,6 +50,8 @@ using graph::DepthwiseConvolutionMethod;
using graph::TensorDescriptor;
using graph::DimensionRoundingType;
using graph::GraphConfig;
+using graph::InterpolationPolicy;
+using graph::Size2D;
/** Branch layer merging method */
enum class BranchMergeMethod
diff --git a/arm_compute/graph/nodes/ChannelShuffleLayerNode.h b/arm_compute/graph/nodes/ChannelShuffleLayerNode.h
new file mode 100644
index 0000000000..696012a818
--- /dev/null
+++ b/arm_compute/graph/nodes/ChannelShuffleLayerNode.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_CHANNEL_SHUFFLE_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH_CHANNEL_SHUFFLE_LAYER_NODE_H__
+
+#include "arm_compute/graph/INode.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Channel Shuffle Layer node */
+class ChannelShuffleLayerNode final : public INode
+{
+public:
+ /** Constructor
+ *
+ * @param[in] num_groups Number of groups
+ */
+ ChannelShuffleLayerNode(unsigned int num_groups);
+ /** Number of groups accessor
+ *
+ * @return Number of groups
+ */
+ unsigned int num_groups() const;
+
+ // Inherited overridden methods:
+ NodeType type() const override;
+ bool forward_descriptors() override;
+ TensorDescriptor configure_output(size_t idx) const override;
+ void accept(INodeVisitor &v) override;
+
+private:
+ unsigned int _num_groups;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_CHANNEL_SHUFFLE_LAYER_NODE_H__ */
diff --git a/arm_compute/graph/nodes/DeconvolutionLayerNode.h b/arm_compute/graph/nodes/DeconvolutionLayerNode.h
new file mode 100644
index 0000000000..73210a299e
--- /dev/null
+++ b/arm_compute/graph/nodes/DeconvolutionLayerNode.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_DECONVOLUTION_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH_DECONVOLUTION_LAYER_NODE_H__
+
+#include "arm_compute/graph/INode.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Deconvolution Layer node */
+class DeconvolutionLayerNode final : public INode
+{
+public:
+ /** Constructor
+ *
+ * @param[in] info DeConvolution layer attributes
+ * @param[in] inner_border Inner border (right, top)
+ */
+ DeconvolutionLayerNode(PadStrideInfo info, Size2D inner_border);
+ /** Deconvolution metadata accessor
+ *
+ * @return Deconvolution information
+ */
+ PadStrideInfo deconvolution_info() const;
+ /** Deconvolution inner border accessor
+ *
+ * @return Inner border(top, right)
+ */
+ Size2D inner_border() const;
+ /** Computes deconvolution output descriptor
+ *
+ * @param[in] input_descriptor Input descriptor
+ * @param[in] weights_descriptor Weights descriptor
+ * @param[in] info Convolution operation attributes
+ * @param[in] inner_border Inner border (right, top)
+ *
+ * @return Output descriptor
+ */
+ static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const TensorDescriptor &weights_descriptor,
+ const PadStrideInfo &info,
+ const Size2D &inner_border);
+
+ // Inherited overridden methods:
+ NodeType type() const override;
+ bool forward_descriptors() override;
+ TensorDescriptor configure_output(size_t idx) const override;
+ void accept(INodeVisitor &v) override;
+
+private:
+ PadStrideInfo _info;
+ Size2D _inner_border;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_DECONVOLUTION_LAYER_NODE_H__ */
diff --git a/arm_compute/graph/nodes/DummyNode.h b/arm_compute/graph/nodes/DummyNode.h
new file mode 100644
index 0000000000..72d73ec825
--- /dev/null
+++ b/arm_compute/graph/nodes/DummyNode.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_DUMMY_NODE_H__
+#define __ARM_COMPUTE_GRAPH_DUMMY_NODE_H__
+
+#include "arm_compute/graph/INode.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Dummy Layer node
+ *
+ * Dummy layer transforms a given input to a specified output with a given shape.
+ *
+ * @note Used only for debugging/performance reasons.
+ * @note It does not perform any computation at all.
+ * @note Can be used to simulate graphs that they have nodes that are not yet supported.
+ */
+class DummyNode final : public INode
+{
+public:
+ /** Constructor
+ *
+ * @param[in] shape Reshaped tensor shape
+ */
+ DummyNode(TensorShape shape);
+
+ // Inherited overridden methods:
+ NodeType type() const override;
+ bool forward_descriptors() override;
+ TensorDescriptor configure_output(size_t idx) const override;
+ void accept(INodeVisitor &v) override;
+
+private:
+ TensorShape _shape;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_DUMMY_NODE_H__ */ \ No newline at end of file
diff --git a/arm_compute/graph/nodes/Nodes.h b/arm_compute/graph/nodes/Nodes.h
index c39546c6bd..97aa191916 100644
--- a/arm_compute/graph/nodes/Nodes.h
+++ b/arm_compute/graph/nodes/Nodes.h
@@ -26,10 +26,13 @@
#include "arm_compute/graph/nodes/ActivationLayerNode.h"
#include "arm_compute/graph/nodes/BatchNormalizationLayerNode.h"
+#include "arm_compute/graph/nodes/ChannelShuffleLayerNode.h"
#include "arm_compute/graph/nodes/ConstNode.h"
#include "arm_compute/graph/nodes/ConvolutionLayerNode.h"
+#include "arm_compute/graph/nodes/DeconvolutionLayerNode.h"
#include "arm_compute/graph/nodes/DepthConcatenateLayerNode.h"
#include "arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h"
+#include "arm_compute/graph/nodes/DummyNode.h"
#include "arm_compute/graph/nodes/EltwiseLayerNode.h"
#include "arm_compute/graph/nodes/FlattenLayerNode.h"
#include "arm_compute/graph/nodes/FullyConnectedLayerNode.h"
@@ -38,6 +41,7 @@
#include "arm_compute/graph/nodes/OutputNode.h"
#include "arm_compute/graph/nodes/PoolingLayerNode.h"
#include "arm_compute/graph/nodes/ReshapeLayerNode.h"
+#include "arm_compute/graph/nodes/ResizeLayerNode.h"
#include "arm_compute/graph/nodes/SoftmaxLayerNode.h"
#include "arm_compute/graph/nodes/SplitLayerNode.h"
diff --git a/arm_compute/graph/nodes/NodesFwd.h b/arm_compute/graph/nodes/NodesFwd.h
index b90cb5c308..05979d796c 100644
--- a/arm_compute/graph/nodes/NodesFwd.h
+++ b/arm_compute/graph/nodes/NodesFwd.h
@@ -32,10 +32,13 @@ namespace graph
class INode;
class ActivationLayerNode;
class BatchNormalizationLayerNode;
+class ChannelShuffleLayerNode;
class ConstNode;
class ConvolutionLayerNode;
+class DeconvolutionLayerNode;
class DepthConcatenateLayerNode;
class DepthwiseConvolutionLayerNode;
+class DummyNode;
class EltwiseLayerNode;
class FlattenLayerNode;
class FullyConnectedLayerNode;
@@ -44,6 +47,7 @@ class NormalizationLayerNode;
class OutputNode;
class PoolingLayerNode;
class ReshapeLayerNode;
+class ResizeLayerNode;
class SoftmaxLayerNode;
class SplitLayerNode;
} // namespace graph
diff --git a/arm_compute/graph/nodes/ResizeLayerNode.h b/arm_compute/graph/nodes/ResizeLayerNode.h
new file mode 100644
index 0000000000..231e79e62e
--- /dev/null
+++ b/arm_compute/graph/nodes/ResizeLayerNode.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_RESIZE_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH_RESIZE_LAYER_NODE_H__
+
+#include "arm_compute/graph/INode.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Resize Layer node */
+class ResizeLayerNode final : public INode
+{
+public:
+ /** Default Constructor */
+ ResizeLayerNode(InterpolationPolicy policy, float scale_width, float scale_height);
+ /** Interpolation policy accessor
+ *
+ * @return Interpolation policy
+ */
+ InterpolationPolicy policy() const;
+ /** Scaling factor accessors
+ *
+ * @return Scaling factors (width, height)
+ */
+ std::pair<float, float> scaling_factor() const;
+
+ // Inherited overridden methods:
+ NodeType type() const override;
+ bool forward_descriptors() override;
+ TensorDescriptor configure_output(size_t idx) const override;
+ void accept(INodeVisitor &v) override;
+
+private:
+ InterpolationPolicy _policy;
+ float _scale_width;
+ float _scale_height;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_RESIZE_LAYER_NODE_H__ */
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index 4c5d30a33f..a20920a74c 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -28,6 +28,8 @@
#include "arm_compute/graph/algorithms/BFS.h"
#include "arm_compute/graph/nodes/Nodes.h"
+#include "support/ToolchainSupport.h"
+
#define CHECK_NODEIDX_PAIR(pair, g) \
ARM_COMPUTE_ERROR_ON(((pair).node_id >= (g).nodes().size()) || ((g).node((pair).node_id) == nullptr) || ((pair).index >= (g).node((pair).node_id)->num_outputs()));
@@ -80,7 +82,7 @@ NodeID create_simple_single_input_output_node(Graph &g, NodeParams &params, Node
return nid;
}
-NodeID create_grouped_convolution(Graph &g, NodeParams &params, NodeIdxPair input, NodeID weights, NodeID bias,
+NodeID create_grouped_convolution(Graph &g, const NodeParams &params, NodeIdxPair input, NodeID weights, NodeID bias,
PadStrideInfo conv_info, ConvolutionMethod method, FastMathHint fast_math_hint, unsigned int num_groups)
{
bool has_bias = (bias != EmptyNodeID);
@@ -102,14 +104,20 @@ NodeID create_grouped_convolution(Graph &g, NodeParams &params, NodeIdxPair inpu
std::vector<NodeIdxPair> convolution_outputs;
for(unsigned int i = 0; i < num_groups; ++i)
{
- NodeID conv_nid = g.add_node<ConvolutionLayerNode>(conv_info, method, fast_math_hint);
+ NodeParams group_params = params;
+ NodeID conv_nid = g.add_node<ConvolutionLayerNode>(conv_info, method, fast_math_hint);
g.add_connection(input_split, i, conv_nid, 0);
g.add_connection(weights_split, i, conv_nid, 1);
if(has_bias)
{
g.add_connection(bias_split, i, conv_nid, 2);
}
- set_node_params(g, conv_nid, params);
+ // Add group name
+ if(!group_params.name.empty())
+ {
+ group_params.name.append("_g" + arm_compute::support::cpp11::to_string(i));
+ }
+ set_node_params(g, conv_nid, group_params);
convolution_outputs.push_back({ conv_nid, 0 });
}
@@ -203,6 +211,11 @@ NodeID GraphBuilder::add_batch_normalization_node(Graph &g, NodeParams params, N
return batch_norm_nid;
}
+NodeID GraphBuilder::add_channel_shuffle_node(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_groups)
+{
+ return create_simple_single_input_output_node<ChannelShuffleLayerNode>(g, params, input, num_groups);
+}
+
NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info,
unsigned int num_groups, ConvolutionMethod method, FastMathHint fast_math_hint,
@@ -262,6 +275,52 @@ NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPa
}
}
+NodeID GraphBuilder::add_deconvolution_node(Graph &g, NodeParams params, NodeIdxPair input,
+ Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo deconv_info,
+ Size2D inner_border, ITensorAccessorUPtr weights_accessor,
+ ITensorAccessorUPtr bias_accessor)
+{
+ CHECK_NODEIDX_PAIR(input, g);
+ ARM_COMPUTE_ERROR_ON(depth == 0);
+ ARM_COMPUTE_ERROR_ON((kernel_spatial_extend.width == 0) || (kernel_spatial_extend.height == 0));
+
+ bool has_bias = (bias_accessor != nullptr);
+
+ // Get input tensor descriptor
+ const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
+
+ // Create weights node
+ TensorDescriptor w_desc = input_tensor_desc;
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::WIDTH), kernel_spatial_extend.width);
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height);
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL),
+ get_dimension_size(input_tensor_desc, DataLayoutDimension::CHANNEL));
+ w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::BATCHES), depth);
+
+ NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
+
+ // Create bias nodes
+ NodeID b_nid = EmptyNodeID;
+ if(has_bias)
+ {
+ TensorDescriptor b_desc = input_tensor_desc;
+ b_desc.shape = TensorShape(depth);
+ b_nid = add_const_node_with_name(g, params, "Bias", b_desc, std::move(bias_accessor));
+ }
+
+ // Create convolution node and connect
+ NodeID deconv_nid = g.add_node<DeconvolutionLayerNode>(deconv_info, inner_border);
+ g.add_connection(input.node_id, input.index, deconv_nid, 0);
+ g.add_connection(w_nid, 0, deconv_nid, 1);
+ if(has_bias)
+ {
+ g.add_connection(b_nid, 0, deconv_nid, 2);
+ }
+ set_node_params(g, deconv_nid, params);
+
+ return deconv_nid;
+}
+
NodeID GraphBuilder::add_depth_concatenate_node(Graph &g, NodeParams params, std::vector<NodeIdxPair> inputs)
{
ARM_COMPUTE_ERROR_ON(inputs.size() == 0);
@@ -326,6 +385,18 @@ NodeID GraphBuilder::add_depthwise_convolution_node(Graph &g, NodeParams params,
return conv_nid;
}
+NodeID GraphBuilder::add_dummy_node(Graph &g, NodeParams params, NodeIdxPair input, TensorShape shape)
+{
+ CHECK_NODEIDX_PAIR(input, g);
+
+ NodeID nid = g.add_node<DummyNode>(shape);
+ g.add_connection(input.node_id, input.index, nid, 0);
+
+ set_node_params(g, nid, params);
+
+ return nid;
+}
+
NodeID GraphBuilder::add_elementwise_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation)
{
CHECK_NODEIDX_PAIR(input0, g);
@@ -399,6 +470,12 @@ NodeID GraphBuilder::add_reshape_node(Graph &g, NodeParams params, NodeIdxPair i
return create_simple_single_input_output_node<ReshapeLayerNode>(g, params, input, shape);
}
+NodeID GraphBuilder::add_resize_node(Graph &g, NodeParams params, NodeIdxPair input, InterpolationPolicy policy,
+ float width_scale, float height_scale)
+{
+ return create_simple_single_input_output_node<ResizeLayerNode>(g, params, input, policy, width_scale, height_scale);
+}
+
NodeID GraphBuilder::add_scale_layer(Graph &g, const NodeParams &params, NodeIdxPair input, ITensorAccessorUPtr mul_accessor, ITensorAccessorUPtr add_accessor)
{
CHECK_NODEIDX_PAIR(input, g);
diff --git a/src/graph/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
index ac04f1063c..90ea81f21a 100644
--- a/src/graph/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -141,6 +141,38 @@ std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLa
return std::move(func);
}
+/** Create a backend channel shuffle layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend channel shuffle layer function
+ */
+std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Creating CL Channel Shuffle node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
+ ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+ // Extract IO and info
+ ICLTensor *input = get_backing_tensor(node.input(0));
+ ICLTensor *output = get_backing_tensor(node.output(0));
+ const unsigned int num_groups = node.num_groups();
+
+ // Create function
+ auto func = support::cpp14::make_unique<CLChannelShuffleLayer>();
+ func->configure(input, output, num_groups);
+
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLChannelShuffleLayer"
+ << " Data Type: " << input->info()->data_type()
+ << " Shape: " << input->info()->tensor_shape()
+ << " Num groups: " << num_groups
+ << std::endl);
+
+ return std::move(func);
+}
+
/** Create a backend convolution layer function
*
* @param[in] node Node to create the backend function for
@@ -206,6 +238,46 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
return func;
}
+/** Create a backend deconvolution layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend deconvolution layer function
+ */
+std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating CL DeconvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+ ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+ // Extract IO and info
+ ICLTensor *input = get_backing_tensor(node.input(0));
+ ICLTensor *weights = get_backing_tensor(node.input(1));
+ ICLTensor *biases = get_backing_tensor(node.input(2));
+ ICLTensor *output = get_backing_tensor(node.output(0));
+
+ const PadStrideInfo deconv_info = node.deconvolution_info();
+ const Size2D inner_border = node.inner_border();
+
+ // Create and configure function (we assume that functions have been validated before creation)
+ std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, Target::CL);
+ std::unique_ptr<IFunction> func;
+ std::string func_name;
+
+ std::tie(func, func_name) = create_named_memory_managed_function<CLDeconvolutionLayer>(std::string("CLDeconvolutionLayer"), mm,
+ input, weights, biases, output,
+ deconv_info, inner_border.x(), inner_border.y());
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+ << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Weights shape: " << weights->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << std::endl);
+ return func;
+}
+
/** Create a backend layer depth concatenate function
*
* @param[in] node Node to create the backend function for
@@ -530,6 +602,41 @@ std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
return std::move(func);
}
+/** Create a backend resize layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend resize layer function
+ */
+std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Creating CL Resize node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+ // Extract IO and info
+ ICLTensor *input = get_backing_tensor(node.input(0));
+ ICLTensor *output = get_backing_tensor(node.output(0));
+ ARM_COMPUTE_ERROR_ON(input == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr);
+ const InterpolationPolicy policy = node.policy();
+
+ // Create and configure function
+ auto func = support::cpp14::make_unique<CLScale>();
+ func->configure(input, output, policy, BorderMode::CONSTANT);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLScale"
+ << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Interpolation: " << policy
+ << std::endl);
+
+ return std::move(func);
+}
+
/** Create a backend softmax layer function
*
* @param[in] node Node to create the backend function for
@@ -579,8 +686,12 @@ std::unique_ptr<IFunction> CLFunctionFactory::create(INode *node, GraphContext &
return create_activation_layer(*polymorphic_downcast<ActivationLayerNode *>(node));
case NodeType::BatchNormalizationLayer:
return create_batch_normalization_layer(*polymorphic_downcast<BatchNormalizationLayerNode *>(node));
+ case NodeType::ChannelShuffleLayer:
+ return create_channel_shuffle_layer(*polymorphic_downcast<ChannelShuffleLayerNode *>(node));
case NodeType::ConvolutionLayer:
return create_convolution_layer(*polymorphic_downcast<ConvolutionLayerNode *>(node), ctx);
+ case NodeType::DeconvolutionLayer:
+ return create_deconvolution_layer(*polymorphic_downcast<DeconvolutionLayerNode *>(node), ctx);
case NodeType::DepthConcatenateLayer:
return create_depth_concatenate_layer(*polymorphic_downcast<DepthConcatenateLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
@@ -597,6 +708,8 @@ std::unique_ptr<IFunction> CLFunctionFactory::create(INode *node, GraphContext &
return create_pooling_layer(*polymorphic_downcast<PoolingLayerNode *>(node));
case NodeType::ReshapeLayer:
return create_reshape_layer(*polymorphic_downcast<ReshapeLayerNode *>(node));
+ case NodeType::ResizeLayer:
+ return create_resize_layer(*polymorphic_downcast<ResizeLayerNode *>(node));
case NodeType::SoftmaxLayer:
return create_softmax_layer(*polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
default:
diff --git a/src/graph/backends/CL/CLNodeValidator.cpp b/src/graph/backends/CL/CLNodeValidator.cpp
index c16b2e67df..3e63617478 100644
--- a/src/graph/backends/CL/CLNodeValidator.cpp
+++ b/src/graph/backends/CL/CLNodeValidator.cpp
@@ -47,6 +47,8 @@ Status CLNodeValidator::validate(INode *node)
NodeType type = node->type();
switch(type)
{
+ case NodeType::ChannelShuffleLayer:
+ return detail::validate_channel_shuffle_layer<CLChannelShuffleLayer>(*polymorphic_downcast<ChannelShuffleLayerNode *>(node));
case NodeType::ConvolutionLayer:
return detail::validate_convolution_layer<CLConvolutionLayer,
CLDirectConvolutionLayer,
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index 7f97876e57..8376feb265 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -190,6 +190,46 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
return func;
}
+/** Create a backend deconvolution layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend deconvolution layer function
+ */
+std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON DeconvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+ ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+ // Extract IO and info
+ ITensor *input = get_backing_tensor(node.input(0));
+ ITensor *weights = get_backing_tensor(node.input(1));
+ ITensor *biases = get_backing_tensor(node.input(2));
+ ITensor *output = get_backing_tensor(node.output(0));
+
+ const PadStrideInfo deconv_info = node.deconvolution_info();
+ const Size2D inner_border = node.inner_border();
+
+ // Create and configure function (we assume that functions have been validated before creation)
+ std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, Target::CL);
+ std::unique_ptr<IFunction> func;
+ std::string func_name;
+
+ std::tie(func, func_name) = create_named_memory_managed_function<NEDeconvolutionLayer>(std::string("NEDeconvolutionLayer"), mm,
+ input, weights, biases, output,
+ deconv_info, inner_border.x(), inner_border.y());
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+ << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Weights shape: " << weights->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << std::endl);
+ return func;
+}
+
/** Create a backend layer depth concatenate function
*
* @param[in] node Node to create the backend function for
@@ -503,6 +543,41 @@ std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
return std::move(func);
}
+/** Create a backend resize layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend resize layer function
+ */
+std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Creating NEON Resize node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+ // Extract IO and info
+ ITensor *input = get_backing_tensor(node.input(0));
+ ITensor *output = get_backing_tensor(node.output(0));
+ ARM_COMPUTE_ERROR_ON(input == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr);
+ const InterpolationPolicy policy = node.policy();
+
+ // Create and configure function
+ auto func = support::cpp14::make_unique<NEScale>();
+ func->configure(input, output, policy, BorderMode::CONSTANT);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEScale"
+ << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Interpolation: " << policy
+ << std::endl);
+
+ return std::move(func);
+}
+
/** Create a backend softmax layer function
*
* @param[in] node Node to create the backend function for
@@ -553,6 +628,8 @@ std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &
return create_batch_normalization_layer(*polymorphic_downcast<BatchNormalizationLayerNode *>(node));
case NodeType::ConvolutionLayer:
return create_convolution_layer(*polymorphic_downcast<ConvolutionLayerNode *>(node), ctx);
+ case NodeType::DeconvolutionLayer:
+ return create_deconvolution_layer(*polymorphic_downcast<DeconvolutionLayerNode *>(node), ctx);
case NodeType::DepthConcatenateLayer:
return create_depth_concatenate_layer(*polymorphic_downcast<DepthConcatenateLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
@@ -569,6 +646,8 @@ std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &
return create_pooling_layer(*polymorphic_downcast<PoolingLayerNode *>(node));
case NodeType::ReshapeLayer:
return create_reshape_layer(*polymorphic_downcast<ReshapeLayerNode *>(node));
+ case NodeType::ResizeLayer:
+ return create_resize_layer(*polymorphic_downcast<ResizeLayerNode *>(node));
case NodeType::SoftmaxLayer:
return create_softmax_layer(*polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
default:
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index e438e79c76..1c17f92fa1 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -47,6 +47,8 @@ Status NENodeValidator::validate(INode *node)
NodeType type = node->type();
switch(type)
{
+ case NodeType::ChannelShuffleLayer:
+ return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Channel Shuffle is unsupported for NEON");
case NodeType::ConvolutionLayer:
return detail::validate_convolution_layer<NEConvolutionLayer,
NEDirectConvolutionLayer,
@@ -55,7 +57,6 @@ Status NENodeValidator::validate(INode *node)
case NodeType::DepthwiseConvolutionLayer:
return detail::validate_depthwise_convolution_layer<NEDepthwiseConvolutionLayer,
NEDepthwiseConvolutionLayer3x3>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
-
default:
return Status{};
}
diff --git a/src/graph/nodes/ChannelShuffleLayerNode.cpp b/src/graph/nodes/ChannelShuffleLayerNode.cpp
new file mode 100644
index 0000000000..08fcce1192
--- /dev/null
+++ b/src/graph/nodes/ChannelShuffleLayerNode.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/ChannelShuffleLayerNode.h"
+
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+ChannelShuffleLayerNode::ChannelShuffleLayerNode(unsigned int num_groups)
+ : _num_groups(num_groups)
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+unsigned int ChannelShuffleLayerNode::num_groups() const
+{
+ return _num_groups;
+}
+
+bool ChannelShuffleLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor ChannelShuffleLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ return src->desc();
+}
+
+NodeType ChannelShuffleLayerNode::type() const
+{
+ return NodeType::ChannelShuffleLayer;
+}
+
+void ChannelShuffleLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/DeconvolutionLayerNode.cpp b/src/graph/nodes/DeconvolutionLayerNode.cpp
new file mode 100644
index 0000000000..9329ae3c23
--- /dev/null
+++ b/src/graph/nodes/DeconvolutionLayerNode.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/DeconvolutionLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+DeconvolutionLayerNode::DeconvolutionLayerNode(PadStrideInfo info, Size2D inner_border)
+ : _info(std::move(info)), _inner_border(inner_border)
+{
+ _input_edges.resize(3, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+PadStrideInfo DeconvolutionLayerNode::deconvolution_info() const
+{
+ return _info;
+}
+
+Size2D DeconvolutionLayerNode::inner_border() const
+{
+ return _inner_border;
+}
+
+TensorDescriptor DeconvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const TensorDescriptor &weights_descriptor,
+ const PadStrideInfo &info,
+ const Size2D &inner_border)
+{
+ unsigned int output_width = 0;
+ unsigned int output_height = 0;
+
+ const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
+ const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
+
+ std::tie(output_width, output_height) = deconvolution_output_dimensions(input_width, input_height,
+ kernel_width, kernel_height,
+ info.pad().first, info.pad().second,
+ inner_border.x(), inner_border.y(),
+ info.stride().first, info.stride().second);
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
+
+ return output_descriptor;
+}
+
+bool DeconvolutionLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor DeconvolutionLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ const Tensor *src = input(0);
+ const Tensor *weights = input(1);
+
+ ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
+
+ TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info, _inner_border);
+ return output_info;
+}
+
+NodeType DeconvolutionLayerNode::type() const
+{
+ return NodeType::DeconvolutionLayer;
+}
+
+void DeconvolutionLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/DummyNode.cpp b/src/graph/nodes/DummyNode.cpp
new file mode 100644
index 0000000000..e6411810de
--- /dev/null
+++ b/src/graph/nodes/DummyNode.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/DummyNode.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Tensor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+DummyNode::DummyNode(TensorShape shape)
+ : _shape(shape)
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+bool DummyNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor DummyNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ TensorDescriptor output_desc = src->desc();
+ output_desc.shape = _shape;
+
+ return output_desc;
+}
+
+NodeType DummyNode::type() const
+{
+ return NodeType::Dummy;
+}
+
+void DummyNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/ResizeLayerNode.cpp b/src/graph/nodes/ResizeLayerNode.cpp
new file mode 100644
index 0000000000..a6aa7bfe5c
--- /dev/null
+++ b/src/graph/nodes/ResizeLayerNode.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/ResizeLayerNode.h"
+
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+ResizeLayerNode::ResizeLayerNode(InterpolationPolicy policy, float scale_width, float scale_height)
+ : _policy(policy), _scale_width(scale_width), _scale_height(scale_height)
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+InterpolationPolicy ResizeLayerNode::policy() const
+{
+ return _policy;
+}
+
+std::pair<float, float> ResizeLayerNode::scaling_factor() const
+{
+ return std::make_pair(_scale_width, _scale_height);
+}
+
+bool ResizeLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor ResizeLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ TensorDescriptor output_desc = src->desc();
+ size_t width_idx = get_dimension_idx(output_desc, DataLayoutDimension::WIDTH);
+ size_t height_idx = get_dimension_idx(output_desc, DataLayoutDimension::HEIGHT);
+ output_desc.shape.set(width_idx, static_cast<int>(output_desc.shape[width_idx] * _scale_width));
+ output_desc.shape.set(height_idx, static_cast<int>(output_desc.shape[height_idx] * _scale_height));
+
+ return output_desc;
+}
+
+NodeType ResizeLayerNode::type() const
+{
+ return NodeType::ResizeLayer;
+}
+
+void ResizeLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file