From 087eaf67dc4be4234a7fcfc3b109c1e4f5e7dd5e Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 16 May 2018 15:52:35 +0100 Subject: COMPMID-1176: Adds nodes to the graph. Nodes added: -ChannelShuffle -Resize -Deconvolution -Dummy (used for performance analysis and debugging) Change-Id: Iad19960cbbce6e25532f77bfd34b2292c0ca9781 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/131672 Tested-by: Jenkins Reviewed-by: Pablo Tello Reviewed-by: Anthony Barbier --- src/graph/GraphBuilder.cpp | 83 ++++++++++++++++++- src/graph/backends/CL/CLFunctionsFactory.cpp | 113 ++++++++++++++++++++++++++ src/graph/backends/CL/CLNodeValidator.cpp | 2 + src/graph/backends/NEON/NEFunctionFactory.cpp | 79 ++++++++++++++++++ src/graph/backends/NEON/NENodeValidator.cpp | 3 +- src/graph/nodes/ChannelShuffleLayerNode.cpp | 78 ++++++++++++++++++ src/graph/nodes/DeconvolutionLayerNode.cpp | 113 ++++++++++++++++++++++++++ src/graph/nodes/DummyNode.cpp | 78 ++++++++++++++++++ src/graph/nodes/ResizeLayerNode.cpp | 90 ++++++++++++++++++++ 9 files changed, 635 insertions(+), 4 deletions(-) create mode 100644 src/graph/nodes/ChannelShuffleLayerNode.cpp create mode 100644 src/graph/nodes/DeconvolutionLayerNode.cpp create mode 100644 src/graph/nodes/DummyNode.cpp create mode 100644 src/graph/nodes/ResizeLayerNode.cpp (limited to 'src/graph') diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp index 4c5d30a33f..a20920a74c 100644 --- a/src/graph/GraphBuilder.cpp +++ b/src/graph/GraphBuilder.cpp @@ -28,6 +28,8 @@ #include "arm_compute/graph/algorithms/BFS.h" #include "arm_compute/graph/nodes/Nodes.h" +#include "support/ToolchainSupport.h" + #define CHECK_NODEIDX_PAIR(pair, g) \ ARM_COMPUTE_ERROR_ON(((pair).node_id >= (g).nodes().size()) || ((g).node((pair).node_id) == nullptr) || ((pair).index >= (g).node((pair).node_id)->num_outputs())); @@ -80,7 +82,7 @@ NodeID create_simple_single_input_output_node(Graph &g, NodeParams ¶ms, Node return nid; } -NodeID create_grouped_convolution(Graph &g, NodeParams ¶ms, NodeIdxPair input, NodeID weights, NodeID bias, +NodeID create_grouped_convolution(Graph &g, const NodeParams ¶ms, NodeIdxPair input, NodeID weights, NodeID bias, PadStrideInfo conv_info, ConvolutionMethod method, FastMathHint fast_math_hint, unsigned int num_groups) { bool has_bias = (bias != EmptyNodeID); @@ -102,14 +104,20 @@ NodeID create_grouped_convolution(Graph &g, NodeParams ¶ms, NodeIdxPair inpu std::vector convolution_outputs; for(unsigned int i = 0; i < num_groups; ++i) { - NodeID conv_nid = g.add_node(conv_info, method, fast_math_hint); + NodeParams group_params = params; + NodeID conv_nid = g.add_node(conv_info, method, fast_math_hint); g.add_connection(input_split, i, conv_nid, 0); g.add_connection(weights_split, i, conv_nid, 1); if(has_bias) { g.add_connection(bias_split, i, conv_nid, 2); } - set_node_params(g, conv_nid, params); + // Add group name + if(!group_params.name.empty()) + { + group_params.name.append("_g" + arm_compute::support::cpp11::to_string(i)); + } + set_node_params(g, conv_nid, group_params); convolution_outputs.push_back({ conv_nid, 0 }); } @@ -203,6 +211,11 @@ NodeID GraphBuilder::add_batch_normalization_node(Graph &g, NodeParams params, N return batch_norm_nid; } +NodeID GraphBuilder::add_channel_shuffle_node(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_groups) +{ + return create_simple_single_input_output_node(g, params, input, num_groups); +} + NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPair input, Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info, unsigned int num_groups, ConvolutionMethod method, FastMathHint fast_math_hint, @@ -262,6 +275,52 @@ NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPa } } +NodeID GraphBuilder::add_deconvolution_node(Graph &g, NodeParams params, NodeIdxPair input, + Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo deconv_info, + Size2D inner_border, ITensorAccessorUPtr weights_accessor, + ITensorAccessorUPtr bias_accessor) +{ + CHECK_NODEIDX_PAIR(input, g); + ARM_COMPUTE_ERROR_ON(depth == 0); + ARM_COMPUTE_ERROR_ON((kernel_spatial_extend.width == 0) || (kernel_spatial_extend.height == 0)); + + bool has_bias = (bias_accessor != nullptr); + + // Get input tensor descriptor + const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]); + + // Create weights node + TensorDescriptor w_desc = input_tensor_desc; + w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::WIDTH), kernel_spatial_extend.width); + w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height); + w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL), + get_dimension_size(input_tensor_desc, DataLayoutDimension::CHANNEL)); + w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::BATCHES), depth); + + NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor)); + + // Create bias nodes + NodeID b_nid = EmptyNodeID; + if(has_bias) + { + TensorDescriptor b_desc = input_tensor_desc; + b_desc.shape = TensorShape(depth); + b_nid = add_const_node_with_name(g, params, "Bias", b_desc, std::move(bias_accessor)); + } + + // Create convolution node and connect + NodeID deconv_nid = g.add_node(deconv_info, inner_border); + g.add_connection(input.node_id, input.index, deconv_nid, 0); + g.add_connection(w_nid, 0, deconv_nid, 1); + if(has_bias) + { + g.add_connection(b_nid, 0, deconv_nid, 2); + } + set_node_params(g, deconv_nid, params); + + return deconv_nid; +} + NodeID GraphBuilder::add_depth_concatenate_node(Graph &g, NodeParams params, std::vector inputs) { ARM_COMPUTE_ERROR_ON(inputs.size() == 0); @@ -326,6 +385,18 @@ NodeID GraphBuilder::add_depthwise_convolution_node(Graph &g, NodeParams params, return conv_nid; } +NodeID GraphBuilder::add_dummy_node(Graph &g, NodeParams params, NodeIdxPair input, TensorShape shape) +{ + CHECK_NODEIDX_PAIR(input, g); + + NodeID nid = g.add_node(shape); + g.add_connection(input.node_id, input.index, nid, 0); + + set_node_params(g, nid, params); + + return nid; +} + NodeID GraphBuilder::add_elementwise_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation) { CHECK_NODEIDX_PAIR(input0, g); @@ -399,6 +470,12 @@ NodeID GraphBuilder::add_reshape_node(Graph &g, NodeParams params, NodeIdxPair i return create_simple_single_input_output_node(g, params, input, shape); } +NodeID GraphBuilder::add_resize_node(Graph &g, NodeParams params, NodeIdxPair input, InterpolationPolicy policy, + float width_scale, float height_scale) +{ + return create_simple_single_input_output_node(g, params, input, policy, width_scale, height_scale); +} + NodeID GraphBuilder::add_scale_layer(Graph &g, const NodeParams ¶ms, NodeIdxPair input, ITensorAccessorUPtr mul_accessor, ITensorAccessorUPtr add_accessor) { CHECK_NODEIDX_PAIR(input, g); diff --git a/src/graph/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp index ac04f1063c..90ea81f21a 100644 --- a/src/graph/backends/CL/CLFunctionsFactory.cpp +++ b/src/graph/backends/CL/CLFunctionsFactory.cpp @@ -141,6 +141,38 @@ std::unique_ptr create_batch_normalization_layer(BatchNormalizationLa return std::move(func); } +/** Create a backend channel shuffle layer function + * + * @param[in] node Node to create the backend function for + * + * @return Backend channel shuffle layer function + */ +std::unique_ptr create_channel_shuffle_layer(ChannelShuffleLayerNode &node) +{ + ARM_COMPUTE_LOG_GRAPH_VERBOSE( + "Creating CL Channel Shuffle node with ID : " << node.id() << " and Name: " << node.name() + << std::endl); + ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1); + + // Extract IO and info + ICLTensor *input = get_backing_tensor(node.input(0)); + ICLTensor *output = get_backing_tensor(node.output(0)); + const unsigned int num_groups = node.num_groups(); + + // Create function + auto func = support::cpp14::make_unique(); + func->configure(input, output, num_groups); + + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLChannelShuffleLayer" + << " Data Type: " << input->info()->data_type() + << " Shape: " << input->info()->tensor_shape() + << " Num groups: " << num_groups + << std::endl); + + return std::move(func); +} + /** Create a backend convolution layer function * * @param[in] node Node to create the backend function for @@ -206,6 +238,46 @@ std::unique_ptr create_convolution_layer(ConvolutionLayerNode &node, return func; } +/** Create a backend deconvolution layer function + * + * @param[in] node Node to create the backend function for + * + * @return Backend deconvolution layer function + */ +std::unique_ptr create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx) +{ + ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating CL DeconvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl); + ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3); + ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1); + + // Extract IO and info + ICLTensor *input = get_backing_tensor(node.input(0)); + ICLTensor *weights = get_backing_tensor(node.input(1)); + ICLTensor *biases = get_backing_tensor(node.input(2)); + ICLTensor *output = get_backing_tensor(node.output(0)); + + const PadStrideInfo deconv_info = node.deconvolution_info(); + const Size2D inner_border = node.inner_border(); + + // Create and configure function (we assume that functions have been validated before creation) + std::shared_ptr mm = get_memory_manager(ctx, Target::CL); + std::unique_ptr func; + std::string func_name; + + std::tie(func, func_name) = create_named_memory_managed_function(std::string("CLDeconvolutionLayer"), mm, + input, weights, biases, output, + deconv_info, inner_border.x(), inner_border.y()); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name + << " Data Type: " << input->info()->data_type() + << " Input shape: " << input->info()->tensor_shape() + << " Weights shape: " << weights->info()->tensor_shape() + << " Output shape: " << output->info()->tensor_shape() + << std::endl); + return func; +} + /** Create a backend layer depth concatenate function * * @param[in] node Node to create the backend function for @@ -530,6 +602,41 @@ std::unique_ptr create_reshape_layer(ReshapeLayerNode &node) return std::move(func); } +/** Create a backend resize layer function + * + * @param[in] node Node to create the backend function for + * + * @return Backend resize layer function + */ +std::unique_ptr create_resize_layer(ResizeLayerNode &node) +{ + ARM_COMPUTE_LOG_GRAPH_VERBOSE( + "Creating CL Resize node with ID : " << node.id() << " and Name: " << node.name() << std::endl); + ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1); + + // Extract IO and info + ICLTensor *input = get_backing_tensor(node.input(0)); + ICLTensor *output = get_backing_tensor(node.output(0)); + ARM_COMPUTE_ERROR_ON(input == nullptr); + ARM_COMPUTE_ERROR_ON(output == nullptr); + const InterpolationPolicy policy = node.policy(); + + // Create and configure function + auto func = support::cpp14::make_unique(); + func->configure(input, output, policy, BorderMode::CONSTANT); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLScale" + << " Data Type: " << input->info()->data_type() + << " Input shape: " << input->info()->tensor_shape() + << " Output shape: " << output->info()->tensor_shape() + << " Interpolation: " << policy + << std::endl); + + return std::move(func); +} + /** Create a backend softmax layer function * * @param[in] node Node to create the backend function for @@ -579,8 +686,12 @@ std::unique_ptr CLFunctionFactory::create(INode *node, GraphContext & return create_activation_layer(*polymorphic_downcast(node)); case NodeType::BatchNormalizationLayer: return create_batch_normalization_layer(*polymorphic_downcast(node)); + case NodeType::ChannelShuffleLayer: + return create_channel_shuffle_layer(*polymorphic_downcast(node)); case NodeType::ConvolutionLayer: return create_convolution_layer(*polymorphic_downcast(node), ctx); + case NodeType::DeconvolutionLayer: + return create_deconvolution_layer(*polymorphic_downcast(node), ctx); case NodeType::DepthConcatenateLayer: return create_depth_concatenate_layer(*polymorphic_downcast(node)); case NodeType::DepthwiseConvolutionLayer: @@ -597,6 +708,8 @@ std::unique_ptr CLFunctionFactory::create(INode *node, GraphContext & return create_pooling_layer(*polymorphic_downcast(node)); case NodeType::ReshapeLayer: return create_reshape_layer(*polymorphic_downcast(node)); + case NodeType::ResizeLayer: + return create_resize_layer(*polymorphic_downcast(node)); case NodeType::SoftmaxLayer: return create_softmax_layer(*polymorphic_downcast(node), ctx); default: diff --git a/src/graph/backends/CL/CLNodeValidator.cpp b/src/graph/backends/CL/CLNodeValidator.cpp index c16b2e67df..3e63617478 100644 --- a/src/graph/backends/CL/CLNodeValidator.cpp +++ b/src/graph/backends/CL/CLNodeValidator.cpp @@ -47,6 +47,8 @@ Status CLNodeValidator::validate(INode *node) NodeType type = node->type(); switch(type) { + case NodeType::ChannelShuffleLayer: + return detail::validate_channel_shuffle_layer(*polymorphic_downcast(node)); case NodeType::ConvolutionLayer: return detail::validate_convolution_layer create_convolution_layer(ConvolutionLayerNode &node, return func; } +/** Create a backend deconvolution layer function + * + * @param[in] node Node to create the backend function for + * + * @return Backend deconvolution layer function + */ +std::unique_ptr create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx) +{ + ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON DeconvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl); + ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3); + ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1); + + // Extract IO and info + ITensor *input = get_backing_tensor(node.input(0)); + ITensor *weights = get_backing_tensor(node.input(1)); + ITensor *biases = get_backing_tensor(node.input(2)); + ITensor *output = get_backing_tensor(node.output(0)); + + const PadStrideInfo deconv_info = node.deconvolution_info(); + const Size2D inner_border = node.inner_border(); + + // Create and configure function (we assume that functions have been validated before creation) + std::shared_ptr mm = get_memory_manager(ctx, Target::CL); + std::unique_ptr func; + std::string func_name; + + std::tie(func, func_name) = create_named_memory_managed_function(std::string("NEDeconvolutionLayer"), mm, + input, weights, biases, output, + deconv_info, inner_border.x(), inner_border.y()); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name + << " Data Type: " << input->info()->data_type() + << " Input shape: " << input->info()->tensor_shape() + << " Weights shape: " << weights->info()->tensor_shape() + << " Output shape: " << output->info()->tensor_shape() + << std::endl); + return func; +} + /** Create a backend layer depth concatenate function * * @param[in] node Node to create the backend function for @@ -503,6 +543,41 @@ std::unique_ptr create_reshape_layer(ReshapeLayerNode &node) return std::move(func); } +/** Create a backend resize layer function + * + * @param[in] node Node to create the backend function for + * + * @return Backend resize layer function + */ +std::unique_ptr create_resize_layer(ResizeLayerNode &node) +{ + ARM_COMPUTE_LOG_GRAPH_VERBOSE( + "Creating NEON Resize node with ID : " << node.id() << " and Name: " << node.name() << std::endl); + ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1); + + // Extract IO and info + ITensor *input = get_backing_tensor(node.input(0)); + ITensor *output = get_backing_tensor(node.output(0)); + ARM_COMPUTE_ERROR_ON(input == nullptr); + ARM_COMPUTE_ERROR_ON(output == nullptr); + const InterpolationPolicy policy = node.policy(); + + // Create and configure function + auto func = support::cpp14::make_unique(); + func->configure(input, output, policy, BorderMode::CONSTANT); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEScale" + << " Data Type: " << input->info()->data_type() + << " Input shape: " << input->info()->tensor_shape() + << " Output shape: " << output->info()->tensor_shape() + << " Interpolation: " << policy + << std::endl); + + return std::move(func); +} + /** Create a backend softmax layer function * * @param[in] node Node to create the backend function for @@ -553,6 +628,8 @@ std::unique_ptr NEFunctionFactory::create(INode *node, GraphContext & return create_batch_normalization_layer(*polymorphic_downcast(node)); case NodeType::ConvolutionLayer: return create_convolution_layer(*polymorphic_downcast(node), ctx); + case NodeType::DeconvolutionLayer: + return create_deconvolution_layer(*polymorphic_downcast(node), ctx); case NodeType::DepthConcatenateLayer: return create_depth_concatenate_layer(*polymorphic_downcast(node)); case NodeType::DepthwiseConvolutionLayer: @@ -569,6 +646,8 @@ std::unique_ptr NEFunctionFactory::create(INode *node, GraphContext & return create_pooling_layer(*polymorphic_downcast(node)); case NodeType::ReshapeLayer: return create_reshape_layer(*polymorphic_downcast(node)); + case NodeType::ResizeLayer: + return create_resize_layer(*polymorphic_downcast(node)); case NodeType::SoftmaxLayer: return create_softmax_layer(*polymorphic_downcast(node), ctx); default: diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp index e438e79c76..1c17f92fa1 100644 --- a/src/graph/backends/NEON/NENodeValidator.cpp +++ b/src/graph/backends/NEON/NENodeValidator.cpp @@ -47,6 +47,8 @@ Status NENodeValidator::validate(INode *node) NodeType type = node->type(); switch(type) { + case NodeType::ChannelShuffleLayer: + return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Channel Shuffle is unsupported for NEON"); case NodeType::ConvolutionLayer: return detail::validate_convolution_layer(*polymorphic_downcast(node)); - default: return Status{}; } diff --git a/src/graph/nodes/ChannelShuffleLayerNode.cpp b/src/graph/nodes/ChannelShuffleLayerNode.cpp new file mode 100644 index 0000000000..08fcce1192 --- /dev/null +++ b/src/graph/nodes/ChannelShuffleLayerNode.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph/nodes/ChannelShuffleLayerNode.h" + +#include "arm_compute/graph/Graph.h" +#include "arm_compute/graph/INodeVisitor.h" + +namespace arm_compute +{ +namespace graph +{ +ChannelShuffleLayerNode::ChannelShuffleLayerNode(unsigned int num_groups) + : _num_groups(num_groups) +{ + _input_edges.resize(1, EmptyEdgeID); + _outputs.resize(1, NullTensorID); +} + +unsigned int ChannelShuffleLayerNode::num_groups() const +{ + return _num_groups; +} + +bool ChannelShuffleLayerNode::forward_descriptors() +{ + if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID)) + { + Tensor *dst = output(0); + ARM_COMPUTE_ERROR_ON(dst == nullptr); + dst->desc() = configure_output(0); + return true; + } + return false; +} + +TensorDescriptor ChannelShuffleLayerNode::configure_output(size_t idx) const +{ + ARM_COMPUTE_UNUSED(idx); + ARM_COMPUTE_ERROR_ON(idx >= _outputs.size()); + + const Tensor *src = input(0); + ARM_COMPUTE_ERROR_ON(src == nullptr); + + return src->desc(); +} + +NodeType ChannelShuffleLayerNode::type() const +{ + return NodeType::ChannelShuffleLayer; +} + +void ChannelShuffleLayerNode::accept(INodeVisitor &v) +{ + v.visit(*this); +} +} // namespace graph +} // namespace arm_compute \ No newline at end of file diff --git a/src/graph/nodes/DeconvolutionLayerNode.cpp b/src/graph/nodes/DeconvolutionLayerNode.cpp new file mode 100644 index 0000000000..9329ae3c23 --- /dev/null +++ b/src/graph/nodes/DeconvolutionLayerNode.cpp @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph/nodes/DeconvolutionLayerNode.h" + +#include "arm_compute/core/Utils.h" +#include "arm_compute/graph/Graph.h" +#include "arm_compute/graph/INodeVisitor.h" +#include "arm_compute/graph/Utils.h" + +namespace arm_compute +{ +namespace graph +{ +DeconvolutionLayerNode::DeconvolutionLayerNode(PadStrideInfo info, Size2D inner_border) + : _info(std::move(info)), _inner_border(inner_border) +{ + _input_edges.resize(3, EmptyEdgeID); + _outputs.resize(1, NullTensorID); +} + +PadStrideInfo DeconvolutionLayerNode::deconvolution_info() const +{ + return _info; +} + +Size2D DeconvolutionLayerNode::inner_border() const +{ + return _inner_border; +} + +TensorDescriptor DeconvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor, + const TensorDescriptor &weights_descriptor, + const PadStrideInfo &info, + const Size2D &inner_border) +{ + unsigned int output_width = 0; + unsigned int output_height = 0; + + const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH); + const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT); + const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH); + const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT); + + std::tie(output_width, output_height) = deconvolution_output_dimensions(input_width, input_height, + kernel_width, kernel_height, + info.pad().first, info.pad().second, + inner_border.x(), inner_border.y(), + info.stride().first, info.stride().second); + + TensorDescriptor output_descriptor = input_descriptor; + output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width); + output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height); + output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]); + + return output_descriptor; +} + +bool DeconvolutionLayerNode::forward_descriptors() +{ + if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID)) + { + Tensor *dst = output(0); + ARM_COMPUTE_ERROR_ON(dst == nullptr); + dst->desc() = configure_output(0); + return true; + } + return false; +} + +TensorDescriptor DeconvolutionLayerNode::configure_output(size_t idx) const +{ + ARM_COMPUTE_UNUSED(idx); + const Tensor *src = input(0); + const Tensor *weights = input(1); + + ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr); + + TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info, _inner_border); + return output_info; +} + +NodeType DeconvolutionLayerNode::type() const +{ + return NodeType::DeconvolutionLayer; +} + +void DeconvolutionLayerNode::accept(INodeVisitor &v) +{ + v.visit(*this); +} +} // namespace graph +} // namespace arm_compute \ No newline at end of file diff --git a/src/graph/nodes/DummyNode.cpp b/src/graph/nodes/DummyNode.cpp new file mode 100644 index 0000000000..e6411810de --- /dev/null +++ b/src/graph/nodes/DummyNode.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph/nodes/DummyNode.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/graph/Graph.h" +#include "arm_compute/graph/INodeVisitor.h" +#include "arm_compute/graph/Tensor.h" + +namespace arm_compute +{ +namespace graph +{ +DummyNode::DummyNode(TensorShape shape) + : _shape(shape) +{ + _input_edges.resize(1, EmptyEdgeID); + _outputs.resize(1, NullTensorID); +} + +bool DummyNode::forward_descriptors() +{ + if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID)) + { + Tensor *dst = output(0); + ARM_COMPUTE_ERROR_ON(dst == nullptr); + dst->desc() = configure_output(0); + return true; + } + return false; +} + +TensorDescriptor DummyNode::configure_output(size_t idx) const +{ + ARM_COMPUTE_UNUSED(idx); + ARM_COMPUTE_ERROR_ON(idx >= _outputs.size()); + + const Tensor *src = input(0); + ARM_COMPUTE_ERROR_ON(src == nullptr); + + TensorDescriptor output_desc = src->desc(); + output_desc.shape = _shape; + + return output_desc; +} + +NodeType DummyNode::type() const +{ + return NodeType::Dummy; +} + +void DummyNode::accept(INodeVisitor &v) +{ + v.visit(*this); +} +} // namespace graph +} // namespace arm_compute \ No newline at end of file diff --git a/src/graph/nodes/ResizeLayerNode.cpp b/src/graph/nodes/ResizeLayerNode.cpp new file mode 100644 index 0000000000..a6aa7bfe5c --- /dev/null +++ b/src/graph/nodes/ResizeLayerNode.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph/nodes/ResizeLayerNode.h" + +#include "arm_compute/graph/Graph.h" +#include "arm_compute/graph/INodeVisitor.h" +#include "arm_compute/graph/Utils.h" + +namespace arm_compute +{ +namespace graph +{ +ResizeLayerNode::ResizeLayerNode(InterpolationPolicy policy, float scale_width, float scale_height) + : _policy(policy), _scale_width(scale_width), _scale_height(scale_height) +{ + _input_edges.resize(1, EmptyEdgeID); + _outputs.resize(1, NullTensorID); +} + +InterpolationPolicy ResizeLayerNode::policy() const +{ + return _policy; +} + +std::pair ResizeLayerNode::scaling_factor() const +{ + return std::make_pair(_scale_width, _scale_height); +} + +bool ResizeLayerNode::forward_descriptors() +{ + if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID)) + { + Tensor *dst = output(0); + ARM_COMPUTE_ERROR_ON(dst == nullptr); + dst->desc() = configure_output(0); + return true; + } + return false; +} + +TensorDescriptor ResizeLayerNode::configure_output(size_t idx) const +{ + ARM_COMPUTE_UNUSED(idx); + ARM_COMPUTE_ERROR_ON(idx >= _outputs.size()); + + const Tensor *src = input(0); + ARM_COMPUTE_ERROR_ON(src == nullptr); + + TensorDescriptor output_desc = src->desc(); + size_t width_idx = get_dimension_idx(output_desc, DataLayoutDimension::WIDTH); + size_t height_idx = get_dimension_idx(output_desc, DataLayoutDimension::HEIGHT); + output_desc.shape.set(width_idx, static_cast(output_desc.shape[width_idx] * _scale_width)); + output_desc.shape.set(height_idx, static_cast(output_desc.shape[height_idx] * _scale_height)); + + return output_desc; +} + +NodeType ResizeLayerNode::type() const +{ + return NodeType::ResizeLayer; +} + +void ResizeLayerNode::accept(INodeVisitor &v) +{ + v.visit(*this); +} +} // namespace graph +} // namespace arm_compute \ No newline at end of file -- cgit v1.2.1