aboutsummaryrefslogtreecommitdiff
path: root/src/graph
diff options
context:
space:
mode:
authorAlex Gilday <alexander.gilday@arm.com>2018-02-15 11:07:18 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:47:40 +0000
commit8913d8d7bc83fdcb6c5dc9baca6bb369418de48b (patch)
treef9556fdf33af663ad9cfa7619093af334ef0af71 /src/graph
parent15997879873b374ea297197fc4aafb15e38b938b (diff)
downloadComputeLibrary-8913d8d7bc83fdcb6c5dc9baca6bb369418de48b.tar.gz
COMPMID-915: Create ResNet50 example
ResidualLayer node (COMPMID-916) also created as required for the ResNet architecture. Change-Id: I4fb4d2e08a8d3ce206f96f7946f5afc3e244676a Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/121185 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/graph')
-rw-r--r--src/graph/Graph.cpp1
-rw-r--r--src/graph/nodes/ResidualLayer.cpp199
-rw-r--r--src/graph/operations/CLSimpleOperations.cpp30
-rw-r--r--src/graph/operations/NESimpleOperations.cpp30
4 files changed, 258 insertions, 2 deletions
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
index 98d95904dc..b6c6822c36 100644
--- a/src/graph/Graph.cpp
+++ b/src/graph/Graph.cpp
@@ -77,6 +77,7 @@ Graph::~Graph() //NOLINT
Graph::Graph()
: _pimpl{ new Private() }
{
+ graph_init();
}
void Graph::graph_init(const bool use_cl_tuner)
diff --git a/src/graph/nodes/ResidualLayer.cpp b/src/graph/nodes/ResidualLayer.cpp
new file mode 100644
index 0000000000..87404f9e1f
--- /dev/null
+++ b/src/graph/nodes/ResidualLayer.cpp
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/ResidualLayer.h"
+
+#include "arm_compute/graph/Error.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
+#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "support/ToolchainSupport.h"
+#include "utils/Utils.h"
+
+#include <memory>
+#include <tuple>
+#include <vector>
+
+using namespace arm_compute::graph;
+
+/** Residual function */
+class ResidualFunction final : public arm_compute::IFunction
+{
+public:
+ /** Default Constructor */
+ ResidualFunction(GraphContext &ctx, ITensorObject *output)
+ : _ctx(ctx), _input(nullptr), _output(output), _func(nullptr), _graphs(), _graph_outputs()
+ {
+ }
+
+ /** Prevent instances from being copy constructed */
+ ResidualFunction(const ResidualFunction &) = delete;
+ /** Prevent instances from being copy assigned */
+ const ResidualFunction &operator=(const ResidualFunction &) = delete;
+ /** Prevent instances from being move constructed */
+ ResidualFunction(ResidualFunction &&) = delete;
+ /** Prevent instances from being move assigned */
+ ResidualFunction &operator=(ResidualFunction &&) = delete;
+ /** Default destructor */
+ ~ResidualFunction() override = default;
+
+ /** Set the input (when using only one sub graph)
+ *
+ * @param[in] input Input to set
+ */
+ void set_input(std::unique_ptr<ITensorObject> input)
+ {
+ _input = std::move(input);
+ }
+
+ /** Registers graph to be executed by the residual function
+ *
+ * @param[in] graph Graph to register
+ * @param[in] output Output to register
+ */
+ void register_graph(std::unique_ptr<Graph> graph, std::unique_ptr<ITensorObject> output)
+ {
+ _graphs.push_back(std::move(graph));
+ _graph_outputs.push_back(std::move(output));
+ }
+
+ /** Configure the function */
+ void configure()
+ {
+ ARM_COMPUTE_ERROR_ON(_graphs.size() < 1 || _graphs.size() > 2);
+ TargetHint target_hint = _ctx.hints().target_hint();
+
+ // Create node context
+ NodeContext node_ctx(OperationType::ArithmeticAddition);
+ node_ctx.set_target(target_hint);
+
+ if(_graphs.size() == 1)
+ {
+ arm_compute::ITensor *in = _input->tensor();
+ node_ctx.add_input(in);
+ }
+
+ for(auto &o : _graph_outputs)
+ {
+ arm_compute::ITensor *in = o->tensor();
+ node_ctx.add_input(in);
+ }
+
+ arm_compute::ITensor *out = _output->tensor();
+ auto_init_if_empty(*out->info(), *_graph_outputs[0]->tensor()->info());
+ node_ctx.add_output(out);
+
+ _func = OperationRegistry::get().find_operation(OperationType::ArithmeticAddition, target_hint)->configure(node_ctx);
+
+ for(auto &o : _graph_outputs)
+ {
+ o->allocate();
+ }
+ }
+
+ // Inherited methods overriden:
+ void run() override
+ {
+ ARM_COMPUTE_ERROR_ON(_graphs.size() < 1 || _graphs.size() > 2);
+
+ for(auto &g : _graphs)
+ {
+ ARM_COMPUTE_ERROR_ON(g.get() == nullptr);
+ g->run();
+ }
+
+ _func->run();
+ }
+
+private:
+ GraphContext _ctx;
+ std::unique_ptr<ITensorObject> _input;
+ ITensorObject *_output;
+ std::unique_ptr<arm_compute::IFunction> _func;
+ std::vector<std::unique_ptr<Graph>> _graphs;
+ std::vector<std::unique_ptr<ITensorObject>> _graph_outputs;
+};
+
+std::unique_ptr<arm_compute::IFunction> ResidualLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+{
+ ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(input) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(output) == nullptr);
+
+ // Create residual function
+ auto func = arm_compute::support::cpp14::make_unique<ResidualFunction>(ctx, output);
+
+ if(_sub_graphs.size() == 1)
+ {
+ std::unique_ptr<ITensorObject> original_in;
+ original_in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
+ input->tensor()->info()->tensor_shape(),
+ Coordinates());
+ func->set_input(std::move(original_in));
+ }
+
+ // Constuct all sub-graphs given the input/output
+ for(auto &sg : _sub_graphs)
+ {
+ ARM_COMPUTE_ERROR_ON(sg.get() == nullptr);
+
+ // IO buffers
+ std::unique_ptr<ITensorObject> in;
+ std::unique_ptr<ITensorObject> out;
+ std::unique_ptr<ITensorObject> func_in;
+
+ // Create input sub-tensor
+ if(!sg->has_input())
+ {
+ in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
+ input->tensor()->info()->tensor_shape(),
+ Coordinates());
+ }
+
+ // Create output sub-tensor
+ if(!sg->has_output())
+ {
+ ITensorInfo *info = input->tensor()->info();
+ func_in = arm_compute::support::cpp14::make_unique<Tensor>(TensorInfo(info->num_channels(), info->data_type(), info->fixed_point_position()));
+ func_in->set_target(ctx.hints().target_hint());
+ out = arm_compute::support::cpp14::make_unique<SubTensor>(func_in->tensor(),
+ TensorShape(),
+ Coordinates(0, 0, 0),
+ func_in->target(),
+ true);
+ }
+
+ // Construct sub_graph
+ auto g = sg->construct(ctx, std::move(in), std::move(out));
+
+ // Register graph to function
+ func->register_graph(std::move(g), std::move(func_in));
+ }
+
+ func->configure();
+
+ return std::move(func);
+}
diff --git a/src/graph/operations/CLSimpleOperations.cpp b/src/graph/operations/CLSimpleOperations.cpp
index 94e3fe15f7..fe56122009 100644
--- a/src/graph/operations/CLSimpleOperations.cpp
+++ b/src/graph/operations/CLSimpleOperations.cpp
@@ -66,6 +66,34 @@ REGISTER_SIMPLE_OPERATION(CLActivationLayerOperation, OPENCL, OperationType::Act
return std::move(activation);
}
+/* Arithmetic addition */
+REGISTER_SIMPLE_OPERATION(CLArithmeticAdditionOperation, OPENCL, OperationType::ArithmeticAddition)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in1 = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *in2 = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+
+ auto addition = arm_compute::support::cpp14::make_unique<arm_compute::CLArithmeticAddition>();
+ addition->configure(in1, in2, out, ConvertPolicy::SATURATE);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLArithmeticAddition"
+ << " Data Type: " << in1->info()->data_type()
+ << " Input 1 shape: " << in1->info()->tensor_shape()
+ << " Input 2 shape: " << in2->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(addition);
+}
+
/* Batch Normalization Layer */
REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationType::BatchNormalizationLayer)
{
@@ -464,4 +492,4 @@ REGISTER_SIMPLE_OPERATION(CLSoftmaxLayerOperation, OPENCL, OperationType::Softma
<< std::endl);
return std::move(smx);
-} \ No newline at end of file
+}
diff --git a/src/graph/operations/NESimpleOperations.cpp b/src/graph/operations/NESimpleOperations.cpp
index 265bed6b7a..4154b9a59c 100644
--- a/src/graph/operations/NESimpleOperations.cpp
+++ b/src/graph/operations/NESimpleOperations.cpp
@@ -66,6 +66,34 @@ REGISTER_SIMPLE_OPERATION(NEActivationLayerOperation, NEON, OperationType::Activ
return std::move(activation);
}
+/* Arithmetic addition */
+REGISTER_SIMPLE_OPERATION(NEArithmeticAdditionOperation, NEON, OperationType::ArithmeticAddition)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in1 = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *in2 = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+
+ auto addition = arm_compute::support::cpp14::make_unique<arm_compute::NEArithmeticAddition>();
+ addition->configure(in1, in2, out, ConvertPolicy::SATURATE);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEArithmeticAddition"
+ << " Data Type: " << in1->info()->data_type()
+ << " Input 1 shape: " << in1->info()->tensor_shape()
+ << " Input 2 shape: " << in2->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(addition);
+}
+
/* Batch Normalization Layer */
REGISTER_SIMPLE_OPERATION(NEBatchNormalizationLayerOperation, NEON, OperationType::BatchNormalizationLayer)
{
@@ -464,4 +492,4 @@ REGISTER_SIMPLE_OPERATION(NESoftmaxLayerOperation, NEON, OperationType::SoftmaxL
<< std::endl);
return std::move(smx);
-} \ No newline at end of file
+}