aboutsummaryrefslogtreecommitdiff
path: root/src/graph/nodes
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-04-03 13:44:29 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commitd9eb27597eabe5b7c17520f4f9b3f8a282d72573 (patch)
tree9b2b7d74b0ef83623b18d6d4279a564e5b63d641 /src/graph/nodes
parenta8ca2b0cfe052c9a28b691317a674f28f495c139 (diff)
downloadComputeLibrary-d9eb27597eabe5b7c17520f4f9b3f8a282d72573.tar.gz
COMPMID-797: Switch to new graph.
- Cleaned up build system Change-Id: If2faa27ee5b31fa8b972836960ab3ef671059c8d Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/126435 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'src/graph/nodes')
-rw-r--r--src/graph/nodes/ActivationLayer.cpp56
-rw-r--r--src/graph/nodes/ActivationLayerNode.cpp83
-rw-r--r--src/graph/nodes/BatchNormalizationLayer.cpp105
-rw-r--r--src/graph/nodes/BatchNormalizationLayerNode.cpp94
-rw-r--r--src/graph/nodes/BranchLayer.cpp130
-rw-r--r--src/graph/nodes/ConstNode.cpp (renamed from src/graph/nodes/QuantizationLayer.cpp)60
-rw-r--r--src/graph/nodes/ConvolutionLayer.cpp363
-rw-r--r--src/graph/nodes/ConvolutionLayerNode.cpp111
-rw-r--r--src/graph/nodes/DeQuantizationLayer.cpp68
-rw-r--r--src/graph/nodes/DepthConcatenateLayerNode.cpp133
-rw-r--r--src/graph/nodes/DepthConvertLayer.cpp58
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayer.cpp91
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayerNode.cpp110
-rw-r--r--src/graph/nodes/EltwiseLayerNode.cpp83
-rw-r--r--src/graph/nodes/FlattenLayer.cpp54
-rw-r--r--src/graph/nodes/FlattenLayerNode.cpp80
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp125
-rw-r--r--src/graph/nodes/InputNode.cpp (renamed from src/graph/nodes/FloorLayer.cpp)61
-rw-r--r--src/graph/nodes/L2NormalizeLayer.cpp56
-rw-r--r--src/graph/nodes/NormalizationLayer.cpp55
-rw-r--r--src/graph/nodes/NormalizationLayerNode.cpp84
-rw-r--r--src/graph/nodes/OutputNode.cpp (renamed from src/graph/nodes/SoftmaxLayer.cpp)55
-rw-r--r--src/graph/nodes/PoolingLayer.cpp55
-rw-r--r--src/graph/nodes/PoolingLayerNode.cpp103
-rw-r--r--src/graph/nodes/ReshapeLayer.cpp70
-rw-r--r--src/graph/nodes/ResidualLayer.cpp199
-rw-r--r--src/graph/nodes/SoftmaxLayerNode.cpp84
-rw-r--r--src/graph/nodes/SplitLayerNode.cpp117
28 files changed, 1312 insertions, 1431 deletions
diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp
deleted file mode 100644
index 546c42a1e5..0000000000
--- a/src/graph/nodes/ActivationLayer.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/ActivationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-ActivationLayer::ActivationLayer(const ActivationLayerInfo activation_info)
- : _activation_info(activation_info)
-{
- set_supports_in_place(true);
-}
-
-std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::ActivationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<ActivationLayerInfo>("ActivationLayerInfo", _activation_info);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::ActivationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/ActivationLayerNode.cpp b/src/graph/nodes/ActivationLayerNode.cpp
new file mode 100644
index 0000000000..9996d2ce3f
--- /dev/null
+++ b/src/graph/nodes/ActivationLayerNode.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/ActivationLayerNode.h"
+
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+ActivationLayerNode::ActivationLayerNode(ActivationLayerInfo info)
+ : _info(info)
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+ActivationLayerInfo ActivationLayerNode::activation_info() const
+{
+ return _info;
+}
+
+bool ActivationLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor ActivationLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ return src->desc();
+}
+
+Status ActivationLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType ActivationLayerNode::type() const
+{
+ return NodeType::ActivationLayer;
+}
+
+void ActivationLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/BatchNormalizationLayer.cpp b/src/graph/nodes/BatchNormalizationLayer.cpp
deleted file mode 100644
index 24287ac61a..0000000000
--- a/src/graph/nodes/BatchNormalizationLayer.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/BatchNormalizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> BatchNormalizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- unsigned int batch_norm_size = in->info()->dimension(2);
- if(_mean.tensor() == nullptr)
- {
- _mean.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
- if(_var.tensor() == nullptr)
- {
- _var.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
- if(_beta.tensor() == nullptr)
- {
- _beta.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
- if(_gamma.tensor() == nullptr)
- {
- _gamma.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
-
- bool mean_is_loaded = _mean.tensor() != nullptr;
- bool var_is_loaded = _var.tensor() != nullptr;
- bool gamma_is_loaded = _gamma.tensor() != nullptr;
- bool beta_is_loaded = _beta.tensor() != nullptr;
-
- // Set mean, var, gamma and beta target
- _mean.set_target(_target_hint);
- _var.set_target(_target_hint);
- _gamma.set_target(_target_hint);
- _beta.set_target(_target_hint);
-
- // Create node context
- NodeContext node_ctx(OperationType::BatchNormalizationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_input(_mean.tensor());
- node_ctx.add_input(_var.tensor());
- node_ctx.add_input(_beta.tensor());
- node_ctx.add_input(_gamma.tensor());
- node_ctx.add_output(out);
- node_ctx.add_parameter<float>("epsilon", _epsilon);
- node_ctx.add_parameter<ActivationLayerInfo>("act_info", _act_info);
-
- // Configure operation
- auto func = OperationRegistry::get().find_operation(OperationType::BatchNormalizationLayer, _target_hint)->configure(node_ctx);
-
- // Fill tensors
- if(!mean_is_loaded)
- {
- _mean.allocate_and_fill_if_needed();
- }
- if(!var_is_loaded)
- {
- _var.allocate_and_fill_if_needed();
- }
- if(!gamma_is_loaded)
- {
- _gamma.allocate_and_fill_if_needed();
- }
- if(!beta_is_loaded)
- {
- _beta.allocate_and_fill_if_needed();
- }
-
- // Get function
- return func;
-} \ No newline at end of file
diff --git a/src/graph/nodes/BatchNormalizationLayerNode.cpp b/src/graph/nodes/BatchNormalizationLayerNode.cpp
new file mode 100644
index 0000000000..f7b041c828
--- /dev/null
+++ b/src/graph/nodes/BatchNormalizationLayerNode.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/BatchNormalizationLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+BatchNormalizationLayerNode::BatchNormalizationLayerNode(float epsilon, ActivationLayerInfo fused_activation)
+ : _epsilon(epsilon), _fused_activation(fused_activation)
+{
+ _input_edges.resize(5, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+float BatchNormalizationLayerNode::epsilon() const
+{
+ return _epsilon;
+}
+
+ActivationLayerInfo BatchNormalizationLayerNode::fused_activation() const
+{
+ return _fused_activation;
+}
+
+void BatchNormalizationLayerNode::set_fused_activation(ActivationLayerInfo fused_activation)
+{
+ _fused_activation = fused_activation;
+}
+
+bool BatchNormalizationLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor BatchNormalizationLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ return src->desc();
+}
+
+Status BatchNormalizationLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType BatchNormalizationLayerNode::type() const
+{
+ return NodeType::BatchNormalizationLayer;
+}
+
+void BatchNormalizationLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/BranchLayer.cpp b/src/graph/nodes/BranchLayer.cpp
deleted file mode 100644
index 7a20a565b8..0000000000
--- a/src/graph/nodes/BranchLayer.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/BranchLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/SubGraph.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
-
-#include <memory>
-#include <tuple>
-#include <vector>
-
-using namespace arm_compute::graph;
-
-/** Branch function */
-class BranchFunction final : public arm_compute::IFunction
-{
-public:
- /** Default Constructor */
- BranchFunction()
- : _graphs()
- {
- }
- /** Registers graph to be executed by the branch function
- *
- * @param[in] graph Graph to register
- */
- void register_graph(std::unique_ptr<Graph> graph)
- {
- _graphs.push_back(std::move(graph));
- }
- // Inherited methods overriden:
- void run() override
- {
- for(auto &g : _graphs)
- {
- ARM_COMPUTE_ERROR_ON(g.get() == nullptr);
- g->run();
- }
- }
-
-private:
- std::vector<std::unique_ptr<Graph>> _graphs;
-};
-
-std::unique_ptr<arm_compute::IFunction> BranchLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON(_branch_merge_method != BranchMergeMethod::DEPTH_CONCATENATE);
- ARM_COMPUTE_UNUSED(_branch_merge_method);
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- // Create branch function
- auto func = arm_compute::support::cpp14::make_unique<BranchFunction>();
-
- // Track output depth
- int depth = 0;
-
- // Constuct all sub-graphs given the input/output
- for(auto &sg : _sub_graphs)
- {
- ARM_COMPUTE_ERROR_ON(sg.get() == nullptr);
-
- // IO buffers
- std::unique_ptr<ITensorObject> in;
- std::unique_ptr<ITensorObject> out;
- SubTensor *out_sub_tensor = nullptr;
-
- // Create input sub-tensor
- if(!sg->has_input())
- {
- ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(input) == nullptr);
- in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
- input->tensor()->info()->tensor_shape(),
- Coordinates());
- }
-
- // Create output sub-tensor
- if(!sg->has_output())
- {
- ARM_COMPUTE_ERROR_ON((dynamic_cast<Tensor *>(output) == nullptr) && (dynamic_cast<SubTensor *>(output) == nullptr));
-
- out = arm_compute::support::cpp14::make_unique<SubTensor>(output->tensor(),
- TensorShape(),
- Coordinates(0, 0, depth),
- output->target(),
- true);
- out_sub_tensor = dynamic_cast<SubTensor *>(out.get());
- }
-
- // Construct sub_graph
- auto g = sg->construct(ctx, std::move(in), std::move(out));
-
- // Register graph to function
- func->register_graph(std::move(g));
-
- // Update and track depth
- if(out_sub_tensor != nullptr)
- {
- ARM_COMPUTE_ERROR_ON(out_sub_tensor->tensor() == nullptr);
- depth += out_sub_tensor->tensor()->info()->tensor_shape()[2];
- }
- }
-
- return std::move(func);
-} \ No newline at end of file
diff --git a/src/graph/nodes/QuantizationLayer.cpp b/src/graph/nodes/ConstNode.cpp
index c102f47633..631971c98f 100644
--- a/src/graph/nodes/QuantizationLayer.cpp
+++ b/src/graph/nodes/ConstNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,28 +21,52 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/nodes/QuantizationLayer.h"
+#include "arm_compute/graph/nodes/ConstNode.h"
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
-using namespace arm_compute::graph;
+namespace arm_compute
+{
+namespace graph
+{
+ConstNode::ConstNode(TensorDescriptor desc)
+ : _desc(desc)
+{
+ _outputs.resize(1, NullTensorID);
+}
-std::unique_ptr<arm_compute::IFunction> QuantizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+bool ConstNode::forward_descriptors()
{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
+ if(output_id(0) != NullTensorID)
+ {
+ Tensor *t = output(0);
+ ARM_COMPUTE_ERROR_ON(t == nullptr);
+ t->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
+TensorDescriptor ConstNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ return _desc;
+}
- // Create node context
- NodeContext node_ctx(OperationType::QuantizationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
+Status ConstNode::validate()
+{
+ return Status{};
+}
+
+NodeType ConstNode::type() const
+{
+ return NodeType::Const;
+}
- // Get function
- return OperationRegistry::get().find_operation(OperationType::QuantizationLayer, _target_hint)->configure(node_ctx);
+void ConstNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
}
+} // namespace graph
+} // namespace arm_compute
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
deleted file mode 100644
index 5b3a84a4ad..0000000000
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/ConvolutionLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEWinogradLayer.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphTypePrinter.h"
-#include "utils/TypePrinter.h"
-
-#include <tuple>
-#include <vector>
-
-using namespace arm_compute::graph;
-
-namespace
-{
-/** Calculates the output shaped of the convolution layer
- *
- * @param[in] input_shape Input tensor shape
- * @param[in] weights_shape Weights shape
- * @param[in] conv_info Convolution information (padding, stride, etc.)
- *
- * @return The expected output tensor shape
- */
-TensorShape calculate_convolution_layer_output_shape(const TensorShape &input_shape, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
-{
- unsigned int output_width = 0;
- unsigned int output_height = 0;
-
- // Get output width and height
- std::tie(output_width, output_height) = arm_compute::scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), conv_info);
-
- // Create output shape
- TensorShape output_shape = input_shape;
- output_shape.set(0, output_width);
- output_shape.set(1, output_height);
- output_shape.set(2, weights_shape[3]);
-
- return output_shape;
-}
-
-// Instantiate GEMM based convolution layer
-template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
-{
- auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
- conv->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(weights),
- dynamic_cast<TensorType *>(biases),
- dynamic_cast<TensorType *>(output),
- conv_info, weights_info);
- return std::move(conv);
-}
-
-// Instantiate direct convolution layer
-template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info)
-{
- auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
- conv->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(weights),
- dynamic_cast<TensorType *>(biases),
- dynamic_cast<TensorType *>(output),
- conv_info);
- return std::move(conv);
-}
-
-template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method)
-{
- if((conv_method == ConvolutionMethodHint::WINOGRAD)
- && arm_compute::CLWinogradConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLWinogradConvolutionLayer");
- return instantiate_direct_function<arm_compute::CLWinogradConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
- }
- else if((conv_method == ConvolutionMethodHint::DIRECT)
- && arm_compute::CLDirectConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDirectConvolutionLayer");
- return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
- }
- else
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLConvolutionLayer");
- return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
- }
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method)
-{
- const unsigned int kernel_size_x = weights->info()->tensor_shape().x();
- const unsigned int kernel_size_y = weights->info()->tensor_shape().y();
- const unsigned int conv_stride_x = conv_info.stride().first;
- const unsigned int conv_stride_y = conv_info.stride().second;
-
- bool is_square_kernel = (kernel_size_x == kernel_size_y);
- bool has_same_stride = (conv_stride_x == conv_stride_y);
-
- // TODO (COMPID-765) : Winograd should have a validate function
- if(conv_method == ConvolutionMethodHint::WINOGRAD && is_square_kernel && ((kernel_size_x == 3) || (kernel_size_x == 5)) && has_same_stride && (conv_info.stride().first == 1))
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEWinogradConvolutionLayer");
- return instantiate_direct_function<arm_compute::NEWinogradLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
- }
- else if((conv_method == ConvolutionMethodHint::DIRECT)
- && arm_compute::NEDirectConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDirectConvolutionLayer");
- return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
- }
- else
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEConvolutionLayer");
- return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info, weights_info);
- }
-}
-} // namespace
-
-/** Grouped Convolution function */
-class GroupedConvolutionFunction final : public arm_compute::IFunction
-{
-public:
- /** Default Constructor */
- GroupedConvolutionFunction() = default;
- /** Default Destructor */
- ~GroupedConvolutionFunction() final = default;
- /** Prevent instances from being copy constructed */
- GroupedConvolutionFunction(const GroupedConvolutionFunction &) = delete;
- /** Prevent instances from being copy assigned */
- GroupedConvolutionFunction &operator=(const GroupedConvolutionFunction &) = delete;
- /** Allow instances to be move constructed */
- GroupedConvolutionFunction(GroupedConvolutionFunction &&) noexcept = default;
- /** Allow instances to be move assigned */
- GroupedConvolutionFunction &operator=(GroupedConvolutionFunction &&) noexcept = default;
- /** Adds a convolution
- *
- * @param convolution Convolution function to add
- */
- void add_convolution_function(std::unique_ptr<IFunction> convolution) // NOLINT
- {
- _convolutions.emplace_back(std::move(convolution));
- }
-
- // Inherited methods overridden:
- void run() override
- {
- for(auto &c : _convolutions)
- {
- c->run();
- }
- }
-
-private:
- std::vector<std::unique_ptr<IFunction>> _convolutions{};
-};
-
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
-
- // Set weights and biases info
- if(_weights.tensor() == nullptr)
- {
- TensorInfo info = TensorInfo(TensorShape(_conv_width, _conv_height, in->info()->dimension(2) / _num_groups, _ofm),
- in->info()->num_channels(),
- in->info()->data_type(),
- in->info()->fixed_point_position());
- info.set_quantization_info(_weights_quant_info);
- _weights.set_info(std::move(info));
- }
- if(_biases.has_accessor() && _biases.tensor() == nullptr)
- {
- DataType dt = in->info()->data_type();
- _biases.set_info(TensorInfo(TensorShape(_ofm), in->info()->num_channels(), is_data_type_quantized_asymmetric(dt) ? DataType::S32 : dt, in->info()->fixed_point_position()));
- }
-
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
- const ConvolutionMethodHint conv_method_hint = ctx.hints().convolution_method_hint();
-
- // Check if the weights and biases are loaded
- bool weights_are_loaded = _weights.tensor() != nullptr;
- bool biases_are_loaded = _biases.has_accessor() ? _biases.tensor() != nullptr : true;
-
- // Set bias and weights target
- _weights.set_target(_target_hint);
- if(_biases.has_accessor())
- {
- _biases.set_target(_target_hint);
- }
-
- // Calculate output shape
- TensorShape output_shape = calculate_convolution_layer_output_shape(in->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
-
- // Output auto inizialitation if not yet initialized
- arm_compute::auto_init_if_empty(*out->info(), output_shape, 1, in->info()->data_type(), in->info()->fixed_point_position(),
- (_out_quant_info.empty()) ? in->info()->quantization_info() : _out_quant_info);
-
- // Create appropriate convolution function
- if(_num_groups == 1)
- {
- func = instantiate_convolution(in, out, conv_method_hint);
- }
- else
- {
- func = instantiate_grouped_convolution(in, out, conv_method_hint);
- }
-
- // Fill weights
- if(!weights_are_loaded)
- {
- _weights.allocate_and_fill_if_needed();
- }
- // Fill biases
- if(!biases_are_loaded)
- {
- _biases.allocate_and_fill_if_needed();
- }
-
- ARM_COMPUTE_LOG_GRAPH_INFO(" Data Type: " << in->info()->data_type()
- << " Input Shape: " << in->info()->tensor_shape()
- << " Weights shape: " << _weights.info().tensor_shape()
- << " Biases Shape: " << _biases.info().tensor_shape()
- << " Output Shape: " << out->info()->tensor_shape()
- << " PadStrideInfo: " << _conv_info
- << " Groups: " << _num_groups
- << " WeightsInfo: " << _weights_info
- << std::endl);
-
- return func;
-}
-
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint)
-{
- std::unique_ptr<arm_compute::IFunction> func;
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
- }
- else
- {
- func = instantiate<TargetHint::NEON>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
- }
- return func;
-}
-
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint)
-{
- // Get tensor shapes
- TensorShape input_shape = input->info()->tensor_shape();
- TensorShape output_shape = output->info()->tensor_shape();
- TensorShape weights_shape = _weights.info().tensor_shape();
- TensorShape biases_shape = _biases.info().tensor_shape();
-
- ARM_COMPUTE_ERROR_ON_MSG((input_shape.z() % _num_groups) != 0, "Input depth not multiple of the number of groups!");
- ARM_COMPUTE_ERROR_ON_MSG((output_shape.z() % _num_groups) != 0, "Output depth not multiple of the number of groups!");
- ARM_COMPUTE_ERROR_ON_MSG((weights_shape[3] % _num_groups) != 0, "Number of kernels not multiple of the number of groups!");
- ARM_COMPUTE_ERROR_ON_MSG((biases_shape.x() % _num_groups) != 0, "Biases not multiple of the number of groups!");
-
- // Create a grouped convolution function
- auto grouped_conv = arm_compute::support::cpp14::make_unique<GroupedConvolutionFunction>();
-
- // Create sub-tensors vectors
- _is = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
- _os = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
- _ws = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
- _bs = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
-
- // Calculate sub-tensor splits
- const int input_split = input_shape.z() / _num_groups;
- const int output_split = output_shape.z() / _num_groups;
- const int weights_split = weights_shape[3] / _num_groups;
- const int biases_split = biases_shape.x() / _num_groups;
-
- // Calculate sub-tensor shapes
- input_shape.set(2, input_split);
- output_shape.set(2, output_split);
- weights_shape.set(3, weights_split);
- biases_shape.set(0, biases_split);
-
- // Configure sub-tensors
- for(int i = 0; i < static_cast<int>(_num_groups); ++i)
- {
- // Create convolution function
- std::unique_ptr<arm_compute::IFunction> func;
-
- // Calculate sub-tensors starting coordinates
- Coordinates input_coord(0, 0, input_split * i);
- Coordinates output_coord(0, 0, output_split * i);
- Coordinates weights_coord(0, 0, 0, weights_split * i);
- Coordinates biases_coord(biases_split * i);
-
- // Create sub-tensors for input, output, weights and bias
- auto hint_to_use = (_target_hint == TargetHint::OPENCL) ? TargetHint::OPENCL : TargetHint::NEON;
- _is[i] = SubTensor(input, input_shape, input_coord, hint_to_use);
- _os[i] = SubTensor(output, output_shape, output_coord, hint_to_use);
- _ws[i] = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
- _bs[i] = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
-
- // Instantiate convolution function
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
- }
- else
- {
- func = instantiate<TargetHint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
- }
-
- // Add convolution function to the list of convolutions for the grouped convolution
- grouped_conv->add_convolution_function(std::move(func));
- }
-
- return std::move(grouped_conv);
-}
diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
new file mode 100644
index 0000000000..461728487f
--- /dev/null
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/ConvolutionLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method)
+ : _info(std::move(info)), _method(method)
+{
+ _input_edges.resize(3, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+void ConvolutionLayerNode::set_convolution_method(ConvolutionMethod method)
+{
+ _method = method;
+}
+
+ConvolutionMethod ConvolutionLayerNode::convolution_method() const
+{
+ return _method;
+}
+
+PadStrideInfo ConvolutionLayerNode::convolution_info() const
+{
+ return _info;
+}
+
+TensorShape ConvolutionLayerNode::compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info)
+{
+ unsigned int output_width = 0;
+ unsigned int output_height = 0;
+ std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), info);
+
+ TensorShape output_shape{ input_shape };
+ output_shape.set(0, output_width);
+ output_shape.set(1, output_height);
+ output_shape.set(2, weights_shape[3]);
+
+ return output_shape;
+}
+
+bool ConvolutionLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor ConvolutionLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ const Tensor *src = input(0);
+ const Tensor *weights = input(1);
+
+ ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
+
+ TensorDescriptor output_info = src->desc();
+ TensorShape output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
+ output_info.shape = output_shape;
+ return output_info;
+}
+
+Status ConvolutionLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType ConvolutionLayerNode::type() const
+{
+ return NodeType::ConvolutionLayer;
+}
+
+void ConvolutionLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/DeQuantizationLayer.cpp b/src/graph/nodes/DeQuantizationLayer.cpp
deleted file mode 100644
index af9ecee157..0000000000
--- a/src/graph/nodes/DeQuantizationLayer.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/DequantizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> DequantizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
-
- if(_min_max.tensor() == nullptr)
- {
- TensorShape shape = in->info()->tensor_shape();
- shape.set(Window::DimX, 2);
- shape.remove_dimension(1);
- shape.remove_dimension(1);
-
- _min_max.set_info(TensorInfo(shape, in->info()->num_channels(), DataType::F32));
- _min_max.set_target(_target_hint);
- }
-
- bool minmax_is_loaded = _min_max.tensor() != nullptr;
-
- // Create node context
- NodeContext node_ctx(OperationType::DequantizationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(_min_max.tensor());
- node_ctx.add_output(out);
-
- // Fill min max
- if(!minmax_is_loaded)
- {
- _min_max.allocate_and_fill_if_needed();
- }
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::DequantizationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/DepthConcatenateLayerNode.cpp b/src/graph/nodes/DepthConcatenateLayerNode.cpp
new file mode 100644
index 0000000000..1c0539744f
--- /dev/null
+++ b/src/graph/nodes/DepthConcatenateLayerNode.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/DepthConcatenateLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+DepthConcatenateLayerNode::DepthConcatenateLayerNode(unsigned int total_nodes)
+ : _total_nodes(total_nodes), _is_enabled(true)
+{
+ _input_edges.resize(total_nodes, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+void DepthConcatenateLayerNode::set_enabled(bool is_enabled)
+{
+ _is_enabled = is_enabled;
+}
+
+bool DepthConcatenateLayerNode::is_enabled() const
+{
+ return _is_enabled;
+}
+
+TensorShape DepthConcatenateLayerNode::compute_output_shape(const std::vector<TensorShape> &input_shapes)
+{
+ ARM_COMPUTE_ERROR_ON(input_shapes.size() == 0);
+
+ TensorShape output_shape = input_shapes[0];
+
+ size_t max_x = 0;
+ size_t max_y = 0;
+ size_t depth = 0;
+
+ for(const auto &shape : input_shapes)
+ {
+ max_x = std::max(shape.x(), max_x);
+ max_y = std::max(shape.y(), max_y);
+ depth += shape.z();
+ }
+
+ output_shape.set(0, max_x);
+ output_shape.set(1, max_y);
+ output_shape.set(2, depth);
+
+ return output_shape;
+}
+
+bool DepthConcatenateLayerNode::forward_descriptors()
+{
+ if(_outputs[0] != NullTensorID)
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor DepthConcatenateLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ // Check if all input tensors are set
+ bool are_all_inputs_set = std::all_of(std::begin(_input_edges), std::end(_input_edges), [](const EdgeID & eid)
+ {
+ return eid != EmptyEdgeID;
+ });
+
+ TensorDescriptor output_info = {};
+
+ if(are_all_inputs_set)
+ {
+ std::vector<TensorShape> inputs_shapes;
+ for(unsigned int i = 0; i < _input_edges.size(); ++i)
+ {
+ const Tensor *t = _graph->tensor(input_id(i));
+ ARM_COMPUTE_ERROR_ON(t == nullptr);
+ inputs_shapes.push_back(t->desc().shape);
+ }
+ output_info = input(0)->desc();
+ TensorShape output_shape = compute_output_shape(inputs_shapes);
+ output_info.shape = output_shape;
+ }
+
+ return output_info;
+}
+
+Status DepthConcatenateLayerNode::validate()
+{
+ ARM_COMPUTE_UNUSED(_total_nodes);
+ return Status{};
+}
+
+NodeType DepthConcatenateLayerNode::type() const
+{
+ return NodeType::DepthConcatenateLayer;
+}
+
+void DepthConcatenateLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/DepthConvertLayer.cpp b/src/graph/nodes/DepthConvertLayer.cpp
deleted file mode 100644
index 9b328e7b3e..0000000000
--- a/src/graph/nodes/DepthConvertLayer.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/DepthConvertLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-DepthConvertLayer::DepthConvertLayer(const ConvertPolicy policy, uint32_t shift, DataType output_datatype)
- : _policy(policy), _shift(shift), _output_datatype(output_datatype)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> DepthConvertLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
-
- // Auto configure output
- arm_compute::auto_init_if_empty(*out->info(), in->info()->tensor_shape(), 1, _output_datatype, in->info()->fixed_point_position());
-
- // Create node context
- NodeContext node_ctx(OperationType::DepthConvertLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<ConvertPolicy>("ConvertPolicy", _policy);
- node_ctx.add_parameter<uint32_t>("shift", _shift);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::DepthConvertLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/DepthwiseConvolutionLayer.cpp b/src/graph/nodes/DepthwiseConvolutionLayer.cpp
deleted file mode 100644
index e5101cc33c..0000000000
--- a/src/graph/nodes/DepthwiseConvolutionLayer.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/DepthwiseConvolutionLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> DepthwiseConvolutionLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- if(_weights.tensor() == nullptr)
- {
- TensorShape weights_shape(_conv_width, _conv_height, input->tensor()->info()->tensor_shape().z());
- TensorInfo info = TensorInfo(TensorShape(weights_shape), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position());
- info.set_quantization_info(_quant_info);
- _weights.set_info(std::move(info));
- }
- if(_biases.has_accessor() && _biases.tensor() == nullptr)
- {
- DataType dt = in->info()->data_type();
- _biases.set_info(TensorInfo(TensorShape(in->info()->dimension(2)), in->info()->num_channels(), is_data_type_quantized_asymmetric(dt) ? DataType::S32 : dt, in->info()->fixed_point_position()));
- }
-
- bool weights_is_loaded = _weights.tensor() != nullptr;
- bool biases_is_loaded = _biases.has_accessor() ? _biases.tensor() != nullptr : true;
-
- _weights.set_target(_target_hint);
- if(_biases.has_accessor())
- {
- _biases.set_target(_target_hint);
- }
-
- // Create node context
- NodeContext node_ctx(OperationType::DepthwiseConvolutionLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_input(_weights.tensor());
- if(_biases.has_accessor())
- {
- node_ctx.add_input(_biases.tensor());
- }
- node_ctx.add_output(out);
- node_ctx.add_parameter<PadStrideInfo>("ConvolutionInfo", _conv_info);
- node_ctx.add_parameter<bool>("Optimized3x3", _opt3x3);
-
- // Configure operation
- auto func = OperationRegistry::get().find_operation(OperationType::DepthwiseConvolutionLayer, _target_hint)->configure(node_ctx);
-
- // Fill tensors
- if(!weights_is_loaded)
- {
- _weights.allocate_and_fill_if_needed();
- }
- if(!biases_is_loaded)
- {
- _biases.allocate_and_fill_if_needed();
- }
-
- // Get function
- return func;
-}
diff --git a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
new file mode 100644
index 0000000000..67a39029e6
--- /dev/null
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+DepthwiseConvolutionLayerNode::DepthwiseConvolutionLayerNode(PadStrideInfo info, DepthwiseConvolutionMethod method)
+ : _info(std::move(info)), _method(method)
+{
+ _input_edges.resize(3, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+void DepthwiseConvolutionLayerNode::set_depthwise_convolution_method(DepthwiseConvolutionMethod method)
+{
+ _method = method;
+}
+
+DepthwiseConvolutionMethod DepthwiseConvolutionLayerNode::depthwise_convolution_method() const
+{
+ return _method;
+}
+
+PadStrideInfo DepthwiseConvolutionLayerNode::convolution_info() const
+{
+ return _info;
+}
+
+TensorShape DepthwiseConvolutionLayerNode::compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info)
+{
+ unsigned int output_width = 0;
+ unsigned int output_height = 0;
+ std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), info);
+
+ TensorShape output_shape{ input_shape };
+ output_shape.set(0, output_width);
+ output_shape.set(1, output_height);
+
+ return output_shape;
+}
+
+bool DepthwiseConvolutionLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor DepthwiseConvolutionLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ const Tensor *src = input(0);
+ const Tensor *weights = input(1);
+
+ ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
+
+ TensorDescriptor output_info = src->desc();
+ TensorShape output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
+ output_info.shape = output_shape;
+ return output_info;
+}
+
+Status DepthwiseConvolutionLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType DepthwiseConvolutionLayerNode::type() const
+{
+ return NodeType::DepthwiseConvolutionLayer;
+}
+
+void DepthwiseConvolutionLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/EltwiseLayerNode.cpp b/src/graph/nodes/EltwiseLayerNode.cpp
new file mode 100644
index 0000000000..b794043f2f
--- /dev/null
+++ b/src/graph/nodes/EltwiseLayerNode.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/EltwiseLayerNode.h"
+
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+EltwiseLayerNode::EltwiseLayerNode(EltwiseOperation op)
+ : _op(op)
+{
+ _input_edges.resize(2, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+EltwiseOperation EltwiseLayerNode::eltwise_operation() const
+{
+ return _op;
+}
+
+bool EltwiseLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor EltwiseLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_UNUSED(_op);
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ return src->desc();
+}
+
+Status EltwiseLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType EltwiseLayerNode::type() const
+{
+ return NodeType::EltwiseLayer;
+}
+
+void EltwiseLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute
diff --git a/src/graph/nodes/FlattenLayer.cpp b/src/graph/nodes/FlattenLayer.cpp
deleted file mode 100644
index ea08296ba3..0000000000
--- a/src/graph/nodes/FlattenLayer.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/FlattenLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> FlattenLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
-
- // Auto configure output
- TensorShape tensor_shape = in->info()->tensor_shape();
- tensor_shape.collapse(in->info()->num_dimensions());
- arm_compute::auto_init_if_empty(*out->info(), tensor_shape, 1, in->info()->data_type(), in->info()->fixed_point_position());
-
- // Create node context
- NodeContext node_ctx(OperationType::FlattenLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::FlattenLayer, _target_hint)->configure(node_ctx);
-} \ No newline at end of file
diff --git a/src/graph/nodes/FlattenLayerNode.cpp b/src/graph/nodes/FlattenLayerNode.cpp
new file mode 100644
index 0000000000..8b847c7056
--- /dev/null
+++ b/src/graph/nodes/FlattenLayerNode.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/FlattenLayerNode.h"
+
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+FlattenLayerNode::FlattenLayerNode()
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+bool FlattenLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor FlattenLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ TensorDescriptor output_desc = src->desc();
+ output_desc.shape.collapse(src->desc().shape.num_dimensions());
+
+ return output_desc;
+}
+
+Status FlattenLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType FlattenLayerNode::type() const
+{
+ return NodeType::FlattenLayer;
+}
+
+void FlattenLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index 3742150d37..cbf2b35ddd 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,18 +21,40 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/nodes/FullyConnectedLayer.h"
+#include "arm_compute/graph/nodes/FullyConnectedLayerNode.h"
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
-using namespace arm_compute::graph;
+namespace arm_compute
+{
+namespace graph
+{
+FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs)
+ : _num_outputs(num_outputs)
+{
+ _input_edges.resize(3, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
-namespace
+TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_shape, unsigned int num_outputs)
{
-TensorShape calculate_fullyconnected_layer_output_shape(const TensorShape &input_shape, unsigned int output_neurons)
+ unsigned int num_weights = 1;
+ unsigned int num_dimensions = input_shape.num_dimensions();
+ // Ignore the batch dimension if there is one:
+ if(num_dimensions == 2 || num_dimensions == 4)
+ {
+ num_dimensions--;
+ }
+ for(unsigned int i = 0; i < num_dimensions; i++)
+ {
+ num_weights *= input_shape[i];
+ }
+ return TensorShape(num_weights, num_outputs);
+}
+
+TensorShape FullyConnectedLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_outputs)
{
// Note: Only 1D batch space is supported at the moment
unsigned int batches = input_shape[1];
@@ -40,67 +62,46 @@ TensorShape calculate_fullyconnected_layer_output_shape(const TensorShape &input
{
batches = input_shape[3];
}
- return TensorShape(output_neurons, batches);
+ return TensorShape(num_outputs, batches);
}
-} // namespace
-std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+bool FullyConnectedLayerNode::forward_descriptors()
{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- if(_weights.tensor() == nullptr)
- {
- unsigned int num_weights = 1;
- unsigned int num_dimensions = in->info()->num_dimensions();
- // Ignore the batch dimension if there is one:
- if(num_dimensions == 2 || num_dimensions == 4)
- {
- num_dimensions--;
- }
- for(unsigned int i = 0; i < num_dimensions; i++)
- {
- num_weights *= in->info()->dimension(i);
- }
- _weights.set_info(TensorInfo(TensorShape(num_weights, _num_neurons), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
- if(_biases.tensor() == nullptr)
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
- _biases.set_info(TensorInfo(TensorShape(_num_neurons), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
}
+ return false;
+}
- // Auto configure output
- arm_compute::auto_init_if_empty(*out->info(),
- calculate_fullyconnected_layer_output_shape(in->info()->tensor_shape(), _num_neurons),
- in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position());
-
- bool weights_are_loaded = _weights.tensor() != nullptr;
- bool biases_are_loaded = _biases.tensor() != nullptr;
+TensorDescriptor FullyConnectedLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
- // Create node context
- NodeContext node_ctx(OperationType::FullyConnectedLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_input(_weights.set_target(_target_hint));
- node_ctx.add_input(_biases.set_target(_target_hint));
- node_ctx.add_output(out);
+ TensorDescriptor output_info = src->desc();
+ TensorShape output_shape = compute_output_shape(src->desc().shape, _num_outputs);
+ output_info.shape = output_shape;
+ return output_info;
+}
- // Configure operation
- auto func = OperationRegistry::get().find_operation(OperationType::FullyConnectedLayer, _target_hint)->configure(node_ctx);
+Status FullyConnectedLayerNode::validate()
+{
+ return Status{};
+}
- // Fill biases
- if(!weights_are_loaded)
- {
- _weights.allocate_and_fill_if_needed();
- }
- if(!biases_are_loaded)
- {
- _biases.allocate_and_fill_if_needed();
- }
+NodeType FullyConnectedLayerNode::type() const
+{
+ return NodeType::FullyConnectedLayer;
+}
- // Get function
- return func;
+void FullyConnectedLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/FloorLayer.cpp b/src/graph/nodes/InputNode.cpp
index 8750546ed9..e912633a66 100644
--- a/src/graph/nodes/FloorLayer.cpp
+++ b/src/graph/nodes/InputNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,29 +21,52 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/nodes/FloorLayer.h"
+#include "arm_compute/graph/nodes/InputNode.h"
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
-using namespace arm_compute::graph;
+namespace arm_compute
+{
+namespace graph
+{
+InputNode::InputNode(TensorDescriptor desc)
+ : _desc(desc)
+{
+ _outputs.resize(1, NullTensorID);
+}
-std::unique_ptr<arm_compute::IFunction> FloorLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+bool InputNode::forward_descriptors()
{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
+ if(output_id(0) != NullTensorID)
+ {
+ Tensor *t = output(0);
+ ARM_COMPUTE_ERROR_ON(t == nullptr);
+ t->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
+TensorDescriptor InputNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ return _desc;
+}
- // Create node context
- NodeContext node_ctx(OperationType::FloorLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
+Status InputNode::validate()
+{
+ return Status{};
+}
+
+NodeType InputNode::type() const
+{
+ return NodeType::Input;
+}
- // Get function
- return OperationRegistry::get().find_operation(OperationType::FloorLayer, _target_hint)->configure(node_ctx);
+void InputNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
}
+} // namespace graph
+} // namespace arm_compute
diff --git a/src/graph/nodes/L2NormalizeLayer.cpp b/src/graph/nodes/L2NormalizeLayer.cpp
deleted file mode 100644
index 9813ba4450..0000000000
--- a/src/graph/nodes/L2NormalizeLayer.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/L2NormalizeLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-L2NormalizeLayer::L2NormalizeLayer(unsigned int axis, float epsilon)
- : _axis(axis), _epsilon(epsilon)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> L2NormalizeLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::L2NormalizeLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<unsigned int>("axis", _axis);
- node_ctx.add_parameter<float>("epsilon", _epsilon);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::L2NormalizeLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp
deleted file mode 100644
index a489329243..0000000000
--- a/src/graph/nodes/NormalizationLayer.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/NormalizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-NormalizationLayer::NormalizationLayer(const NormalizationLayerInfo norm_info)
- : _norm_info(norm_info)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::NormalizationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<NormalizationLayerInfo>("NormalizationLayerInfo", _norm_info);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::NormalizationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/NormalizationLayerNode.cpp b/src/graph/nodes/NormalizationLayerNode.cpp
new file mode 100644
index 0000000000..a9f2fbd066
--- /dev/null
+++ b/src/graph/nodes/NormalizationLayerNode.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/NormalizationLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+NormalizationLayerNode::NormalizationLayerNode(NormalizationLayerInfo norm_info)
+ : _info(norm_info)
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+NormalizationLayerInfo NormalizationLayerNode::normalization_info() const
+{
+ return _info;
+}
+
+bool NormalizationLayerNode::forward_descriptors()
+{
+ if(input_id(0) != NullTensorID && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor NormalizationLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ return src->desc();
+}
+
+Status NormalizationLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType NormalizationLayerNode::type() const
+{
+ return NodeType::NormalizationLayer;
+}
+
+void NormalizationLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/OutputNode.cpp
index 7f2325b312..4c63bfa20c 100644
--- a/src/graph/nodes/SoftmaxLayer.cpp
+++ b/src/graph/nodes/OutputNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,29 +21,46 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/nodes/SoftmaxLayer.h"
+#include "arm_compute/graph/nodes/OutputNode.h"
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Tensor.h"
-using namespace arm_compute::graph;
+namespace arm_compute
+{
+namespace graph
+{
+OutputNode::OutputNode()
+{
+ _input_edges.resize(1, EmptyEdgeID);
+}
-std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+bool OutputNode::forward_descriptors()
{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
+ return true;
+}
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
+TensorDescriptor OutputNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ return TensorDescriptor();
+}
- // Create node context
- NodeContext node_ctx(OperationType::SoftmaxLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
+Status OutputNode::validate()
+{
+ return Status{};
+}
+
+NodeType OutputNode::type() const
+{
+ return NodeType::Output;
+}
- // Get function
- return OperationRegistry::get().find_operation(OperationType::SoftmaxLayer, _target_hint)->configure(node_ctx);
+void OutputNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
}
+} // namespace graph
+} // namespace arm_compute
diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp
deleted file mode 100644
index 2c151194f3..0000000000
--- a/src/graph/nodes/PoolingLayer.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/PoolingLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-PoolingLayer::PoolingLayer(const PoolingLayerInfo pool_info)
- : _pool_info(pool_info)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::PoolingLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<PoolingLayerInfo>("PoolingLayerInfo", _pool_info);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::PoolingLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/PoolingLayerNode.cpp b/src/graph/nodes/PoolingLayerNode.cpp
new file mode 100644
index 0000000000..a7b6b3679a
--- /dev/null
+++ b/src/graph/nodes/PoolingLayerNode.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/PoolingLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+PoolingLayerNode::PoolingLayerNode(PoolingLayerInfo pool_info)
+ : _info(std::move(pool_info))
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+PoolingLayerInfo PoolingLayerNode::pooling_info() const
+{
+ return _info;
+}
+
+TensorShape PoolingLayerNode::compute_output_shape(TensorShape input_shape, PoolingLayerInfo info)
+{
+ const int pool_size_x = info.is_global_pooling() ? input_shape.x() : info.pool_size().width;
+ const int pool_size_y = info.is_global_pooling() ? input_shape.y() : info.pool_size().height;
+
+ unsigned int pooled_width = 0;
+ unsigned int pooled_height = 0;
+ std::tie(pooled_width, pooled_height) = scaled_dimensions(input_shape.x(), input_shape.y(), pool_size_x, pool_size_y, info.pad_stride_info());
+
+ TensorShape output_shape{ input_shape };
+ output_shape.set(0, pooled_width);
+ output_shape.set(1, pooled_height);
+
+ return output_shape;
+}
+
+bool PoolingLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor PoolingLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ TensorDescriptor output_info = src->desc();
+ TensorShape output_shape = compute_output_shape(src->desc().shape, _info);
+ output_info.shape = output_shape;
+ return output_info;
+}
+
+Status PoolingLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType PoolingLayerNode::type() const
+{
+ return NodeType::PoolingLayer;
+}
+
+void PoolingLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/ReshapeLayer.cpp b/src/graph/nodes/ReshapeLayer.cpp
index b0c117e418..2757f06bd3 100644
--- a/src/graph/nodes/ReshapeLayer.cpp
+++ b/src/graph/nodes/ReshapeLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,37 +21,61 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/nodes/ReshapeLayer.h"
+#include "arm_compute/graph/nodes/ReshapeLayerNode.h"
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
-using namespace arm_compute::graph;
-
-ReshapeLayer::ReshapeLayer(TensorShape shape)
+namespace arm_compute
+{
+namespace graph
+{
+ReshapeLayerNode::ReshapeLayerNode(TensorShape shape)
: _shape(shape)
{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
}
-std::unique_ptr<arm_compute::IFunction> ReshapeLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+bool ReshapeLayerNode::forward_descriptors()
{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor ReshapeLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
+ TensorDescriptor output_desc = src->desc();
+ output_desc.shape = _shape;
- // Auto configure output
- arm_compute::auto_init_if_empty(*out->info(), _shape, 1, in->info()->data_type(), in->info()->fixed_point_position(), in->info()->quantization_info());
+ return output_desc;
+}
- // Create node context
- NodeContext node_ctx(OperationType::ReshapeLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
+Status ReshapeLayerNode::validate()
+{
+ return Status{};
+}
- // Get function
- return OperationRegistry::get().find_operation(OperationType::ReshapeLayer, _target_hint)->configure(node_ctx);
+NodeType ReshapeLayerNode::type() const
+{
+ return NodeType::ReshapeLayer;
+}
+
+void ReshapeLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/ResidualLayer.cpp b/src/graph/nodes/ResidualLayer.cpp
deleted file mode 100644
index 87404f9e1f..0000000000
--- a/src/graph/nodes/ResidualLayer.cpp
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/ResidualLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "arm_compute/graph/SubGraph.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "support/ToolchainSupport.h"
-#include "utils/Utils.h"
-
-#include <memory>
-#include <tuple>
-#include <vector>
-
-using namespace arm_compute::graph;
-
-/** Residual function */
-class ResidualFunction final : public arm_compute::IFunction
-{
-public:
- /** Default Constructor */
- ResidualFunction(GraphContext &ctx, ITensorObject *output)
- : _ctx(ctx), _input(nullptr), _output(output), _func(nullptr), _graphs(), _graph_outputs()
- {
- }
-
- /** Prevent instances from being copy constructed */
- ResidualFunction(const ResidualFunction &) = delete;
- /** Prevent instances from being copy assigned */
- const ResidualFunction &operator=(const ResidualFunction &) = delete;
- /** Prevent instances from being move constructed */
- ResidualFunction(ResidualFunction &&) = delete;
- /** Prevent instances from being move assigned */
- ResidualFunction &operator=(ResidualFunction &&) = delete;
- /** Default destructor */
- ~ResidualFunction() override = default;
-
- /** Set the input (when using only one sub graph)
- *
- * @param[in] input Input to set
- */
- void set_input(std::unique_ptr<ITensorObject> input)
- {
- _input = std::move(input);
- }
-
- /** Registers graph to be executed by the residual function
- *
- * @param[in] graph Graph to register
- * @param[in] output Output to register
- */
- void register_graph(std::unique_ptr<Graph> graph, std::unique_ptr<ITensorObject> output)
- {
- _graphs.push_back(std::move(graph));
- _graph_outputs.push_back(std::move(output));
- }
-
- /** Configure the function */
- void configure()
- {
- ARM_COMPUTE_ERROR_ON(_graphs.size() < 1 || _graphs.size() > 2);
- TargetHint target_hint = _ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::ArithmeticAddition);
- node_ctx.set_target(target_hint);
-
- if(_graphs.size() == 1)
- {
- arm_compute::ITensor *in = _input->tensor();
- node_ctx.add_input(in);
- }
-
- for(auto &o : _graph_outputs)
- {
- arm_compute::ITensor *in = o->tensor();
- node_ctx.add_input(in);
- }
-
- arm_compute::ITensor *out = _output->tensor();
- auto_init_if_empty(*out->info(), *_graph_outputs[0]->tensor()->info());
- node_ctx.add_output(out);
-
- _func = OperationRegistry::get().find_operation(OperationType::ArithmeticAddition, target_hint)->configure(node_ctx);
-
- for(auto &o : _graph_outputs)
- {
- o->allocate();
- }
- }
-
- // Inherited methods overriden:
- void run() override
- {
- ARM_COMPUTE_ERROR_ON(_graphs.size() < 1 || _graphs.size() > 2);
-
- for(auto &g : _graphs)
- {
- ARM_COMPUTE_ERROR_ON(g.get() == nullptr);
- g->run();
- }
-
- _func->run();
- }
-
-private:
- GraphContext _ctx;
- std::unique_ptr<ITensorObject> _input;
- ITensorObject *_output;
- std::unique_ptr<arm_compute::IFunction> _func;
- std::vector<std::unique_ptr<Graph>> _graphs;
- std::vector<std::unique_ptr<ITensorObject>> _graph_outputs;
-};
-
-std::unique_ptr<arm_compute::IFunction> ResidualLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(input) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(output) == nullptr);
-
- // Create residual function
- auto func = arm_compute::support::cpp14::make_unique<ResidualFunction>(ctx, output);
-
- if(_sub_graphs.size() == 1)
- {
- std::unique_ptr<ITensorObject> original_in;
- original_in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
- input->tensor()->info()->tensor_shape(),
- Coordinates());
- func->set_input(std::move(original_in));
- }
-
- // Constuct all sub-graphs given the input/output
- for(auto &sg : _sub_graphs)
- {
- ARM_COMPUTE_ERROR_ON(sg.get() == nullptr);
-
- // IO buffers
- std::unique_ptr<ITensorObject> in;
- std::unique_ptr<ITensorObject> out;
- std::unique_ptr<ITensorObject> func_in;
-
- // Create input sub-tensor
- if(!sg->has_input())
- {
- in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
- input->tensor()->info()->tensor_shape(),
- Coordinates());
- }
-
- // Create output sub-tensor
- if(!sg->has_output())
- {
- ITensorInfo *info = input->tensor()->info();
- func_in = arm_compute::support::cpp14::make_unique<Tensor>(TensorInfo(info->num_channels(), info->data_type(), info->fixed_point_position()));
- func_in->set_target(ctx.hints().target_hint());
- out = arm_compute::support::cpp14::make_unique<SubTensor>(func_in->tensor(),
- TensorShape(),
- Coordinates(0, 0, 0),
- func_in->target(),
- true);
- }
-
- // Construct sub_graph
- auto g = sg->construct(ctx, std::move(in), std::move(out));
-
- // Register graph to function
- func->register_graph(std::move(g), std::move(func_in));
- }
-
- func->configure();
-
- return std::move(func);
-}
diff --git a/src/graph/nodes/SoftmaxLayerNode.cpp b/src/graph/nodes/SoftmaxLayerNode.cpp
new file mode 100644
index 0000000000..4c21ac6ad0
--- /dev/null
+++ b/src/graph/nodes/SoftmaxLayerNode.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/SoftmaxLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+SoftmaxLayerNode::SoftmaxLayerNode(float beta)
+ : _beta(beta)
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+float SoftmaxLayerNode::beta() const
+{
+ return _beta;
+}
+
+bool SoftmaxLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor SoftmaxLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ return src->desc();
+}
+
+Status SoftmaxLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType SoftmaxLayerNode::type() const
+{
+ return NodeType::SoftmaxLayer;
+}
+
+void SoftmaxLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/SplitLayerNode.cpp b/src/graph/nodes/SplitLayerNode.cpp
new file mode 100644
index 0000000000..c8fb43c2a1
--- /dev/null
+++ b/src/graph/nodes/SplitLayerNode.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/SplitLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+SplitLayerNode::SplitLayerNode(unsigned int num_splits, unsigned int axis)
+ : _num_splits(num_splits), _axis(axis)
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(num_splits, NullTensorID);
+}
+
+unsigned int SplitLayerNode::num_splits() const
+{
+ return _num_splits;
+}
+
+unsigned int SplitLayerNode::axis() const
+{
+ return _axis;
+}
+
+std::pair<TensorShape, Coordinates> SplitLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_splits, unsigned int axis, unsigned int idx)
+{
+ ARM_COMPUTE_ERROR_ON(axis >= input_shape.num_dimensions());
+ ARM_COMPUTE_ERROR_ON_MSG(input_shape[axis] % num_splits, "Split should be exact");
+
+ const unsigned int split_size = input_shape[axis] / num_splits;
+
+ TensorShape output_shape = input_shape;
+ output_shape.set(axis, split_size);
+
+ Coordinates coords;
+ coords.set(axis, idx * split_size);
+
+ return std::make_pair(output_shape, coords);
+}
+
+bool SplitLayerNode::forward_descriptors()
+{
+ if(input_id(0) != NullTensorID)
+ {
+ for(unsigned int i = 0; i < _outputs.size(); ++i)
+ {
+ if(output_id(i) != NullTensorID)
+ {
+ Tensor *dst_i = output(i);
+ ARM_COMPUTE_ERROR_ON(dst_i == nullptr);
+ dst_i->desc() = configure_output(i);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor SplitLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ TensorShape output_shape;
+
+ TensorDescriptor output_info = src->desc();
+ std::tie(output_shape, std::ignore) = compute_output_shape(src->desc().shape, _num_splits, _axis, idx);
+ output_info.shape = output_shape;
+
+ return output_info;
+}
+
+Status SplitLayerNode::validate()
+{
+ return Status{};
+}
+
+NodeType SplitLayerNode::type() const
+{
+ return NodeType::SplitLayer;
+}
+
+void SplitLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file