aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-04-03 13:44:29 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commitd9eb27597eabe5b7c17520f4f9b3f8a282d72573 (patch)
tree9b2b7d74b0ef83623b18d6d4279a564e5b63d641 /src
parenta8ca2b0cfe052c9a28b691317a674f28f495c139 (diff)
downloadComputeLibrary-d9eb27597eabe5b7c17520f4f9b3f8a282d72573.tar.gz
COMPMID-797: Switch to new graph.
- Cleaned up build system Change-Id: If2faa27ee5b31fa8b972836960ab3ef671059c8d Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/126435 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/OpenCL.cpp32
-rw-r--r--src/graph/CL/CLMap.cpp43
-rw-r--r--src/graph/CL/CLUnmap.cpp43
-rw-r--r--src/graph/Graph.cpp344
-rw-r--r--src/graph/GraphBuilder.cpp (renamed from src/graph2/GraphBuilder.cpp)14
-rw-r--r--src/graph/GraphContext.cpp56
-rw-r--r--src/graph/GraphManager.cpp (renamed from src/graph2/GraphManager.cpp)18
-rw-r--r--src/graph/INode.cpp176
-rw-r--r--src/graph/NodeContext.cpp75
-rw-r--r--src/graph/OperationRegistry.cpp61
-rw-r--r--src/graph/PassManager.cpp (renamed from src/graph2/PassManager.cpp)8
-rw-r--r--src/graph/SubGraph.cpp110
-rw-r--r--src/graph/SubTensor.cpp119
-rw-r--r--src/graph/Tensor.cpp141
-rw-r--r--src/graph/Utils.cpp (renamed from src/graph2/Utils.cpp)14
-rw-r--r--src/graph/Workload.cpp (renamed from src/graph2/Workload.cpp)10
-rw-r--r--src/graph/backends/BackendRegistry.cpp (renamed from src/graph2/backends/BackendRegistry.cpp)8
-rw-r--r--src/graph/backends/CL/CLDeviceBackend.cpp (renamed from src/graph2/backends/CL/CLDeviceBackend.cpp)33
-rw-r--r--src/graph/backends/CL/CLFunctionsFactory.cpp (renamed from src/graph2/backends/CL/CLFunctionsFactory.cpp)24
-rw-r--r--src/graph/backends/CL/CLNodeValidator.cpp (renamed from src/graph2/backends/CL/CLNodeValidator.cpp)10
-rw-r--r--src/graph/backends/CL/CLSubTensorHandle.cpp (renamed from src/graph2/backends/CL/CLSubTensorHandle.cpp)6
-rw-r--r--src/graph/backends/CL/CLTensorHandle.cpp (renamed from src/graph2/backends/CL/CLTensorHandle.cpp)6
-rw-r--r--src/graph/backends/GLES/GCDeviceBackend.cpp (renamed from src/graph2/backends/GLES/GCDeviceBackend.cpp)31
-rw-r--r--src/graph/backends/GLES/GCFunctionsFactory.cpp (renamed from src/graph2/backends/GLES/GCFunctionsFactory.cpp)24
-rw-r--r--src/graph/backends/GLES/GCNodeValidator.cpp (renamed from src/graph2/backends/GLES/GCNodeValidator.cpp)10
-rw-r--r--src/graph/backends/GLES/GCTensorHandle.cpp (renamed from src/graph2/backends/GLES/GCTensorHandle.cpp)6
-rw-r--r--src/graph/backends/NEON/NEDeviceBackend.cpp (renamed from src/graph2/backends/NEON/NEDeviceBackend.cpp)33
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp (renamed from src/graph2/backends/NEON/NEFunctionFactory.cpp)20
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp (renamed from src/graph2/backends/NEON/NENodeValidator.cpp)10
-rw-r--r--src/graph/backends/NEON/NESubTensorHandle.cpp (renamed from src/graph2/backends/NEON/NESubTensorHandle.cpp)6
-rw-r--r--src/graph/backends/NEON/NETensorHandle.cpp (renamed from src/graph2/backends/NEON/NETensorHandle.cpp)6
-rw-r--r--src/graph/detail/ExecutionHelpers.cpp (renamed from src/graph2/detail/ExecutionHelpers.cpp)16
-rw-r--r--src/graph/frontend/Stream.cpp (renamed from src/graph2/frontend/Stream.cpp)10
-rw-r--r--src/graph/frontend/SubStream.cpp (renamed from src/graph2/frontend/SubStream.cpp)10
-rw-r--r--src/graph/mutators/DepthConcatSubTensorMutator.cpp (renamed from src/graph2/mutators/DepthConcatSubTensorMutator.cpp)14
-rw-r--r--src/graph/mutators/InPlaceOperationMutator.cpp (renamed from src/graph2/mutators/InPlaceOperationMutator.cpp)10
-rw-r--r--src/graph/mutators/NodeFusionMutator.cpp (renamed from src/graph2/mutators/NodeFusionMutator.cpp)12
-rw-r--r--src/graph/mutators/SplitLayerSubTensorMutator.cpp (renamed from src/graph2/mutators/SplitLayerSubTensorMutator.cpp)14
-rw-r--r--src/graph/nodes/ActivationLayer.cpp56
-rw-r--r--src/graph/nodes/ActivationLayerNode.cpp (renamed from src/graph2/nodes/ActivationLayerNode.cpp)10
-rw-r--r--src/graph/nodes/BatchNormalizationLayer.cpp105
-rw-r--r--src/graph/nodes/BatchNormalizationLayerNode.cpp (renamed from src/graph2/nodes/BatchNormalizationLayerNode.cpp)10
-rw-r--r--src/graph/nodes/BranchLayer.cpp130
-rw-r--r--src/graph/nodes/ConstNode.cpp (renamed from src/graph2/nodes/ConstNode.cpp)10
-rw-r--r--src/graph/nodes/ConvolutionLayer.cpp363
-rw-r--r--src/graph/nodes/ConvolutionLayerNode.cpp (renamed from src/graph2/nodes/ConvolutionLayerNode.cpp)10
-rw-r--r--src/graph/nodes/DeQuantizationLayer.cpp68
-rw-r--r--src/graph/nodes/DepthConcatenateLayerNode.cpp (renamed from src/graph2/nodes/DepthConcatenateLayerNode.cpp)10
-rw-r--r--src/graph/nodes/DepthConvertLayer.cpp58
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayer.cpp91
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayerNode.cpp (renamed from src/graph2/nodes/DepthwiseConvolutionLayerNode.cpp)10
-rw-r--r--src/graph/nodes/EltwiseLayerNode.cpp (renamed from src/graph2/nodes/EltwiseLayerNode.cpp)10
-rw-r--r--src/graph/nodes/FlattenLayer.cpp54
-rw-r--r--src/graph/nodes/FlattenLayerNode.cpp (renamed from src/graph2/nodes/FlattenLayerNode.cpp)10
-rw-r--r--src/graph/nodes/FloorLayer.cpp49
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp125
-rw-r--r--src/graph/nodes/InputNode.cpp (renamed from src/graph2/nodes/InputNode.cpp)10
-rw-r--r--src/graph/nodes/L2NormalizeLayer.cpp56
-rw-r--r--src/graph/nodes/NormalizationLayer.cpp55
-rw-r--r--src/graph/nodes/NormalizationLayerNode.cpp (renamed from src/graph2/nodes/NormalizationLayerNode.cpp)10
-rw-r--r--src/graph/nodes/OutputNode.cpp (renamed from src/graph2/nodes/OutputNode.cpp)12
-rw-r--r--src/graph/nodes/PoolingLayer.cpp55
-rw-r--r--src/graph/nodes/PoolingLayerNode.cpp (renamed from src/graph2/nodes/PoolingLayerNode.cpp)10
-rw-r--r--src/graph/nodes/QuantizationLayer.cpp48
-rw-r--r--src/graph/nodes/ReshapeLayer.cpp70
-rw-r--r--src/graph/nodes/ResidualLayer.cpp199
-rw-r--r--src/graph/nodes/SoftmaxLayer.cpp49
-rw-r--r--src/graph/nodes/SoftmaxLayerNode.cpp (renamed from src/graph2/nodes/SoftmaxLayerNode.cpp)10
-rw-r--r--src/graph/nodes/SplitLayerNode.cpp (renamed from src/graph2/nodes/SplitLayerNode.cpp)10
-rw-r--r--src/graph/operations/CLSimpleOperations.cpp495
-rw-r--r--src/graph/operations/NESimpleOperations.cpp495
-rw-r--r--src/graph/printers/DotGraphPrinter.cpp (renamed from src/graph2/printers/DotGraphPrinter.cpp)14
-rw-r--r--src/graph2/Graph.cpp227
-rw-r--r--src/graph2/GraphContext.cpp74
-rw-r--r--src/graph2/INode.cpp193
-rw-r--r--src/graph2/Tensor.cpp111
-rw-r--r--src/graph2/nodes/FullyConnectedLayer.cpp107
-rw-r--r--src/graph2/nodes/ReshapeLayer.cpp81
-rw-r--r--src/runtime/NEON/functions/NEWinogradLayer.cpp2
79 files changed, 783 insertions, 4372 deletions
diff --git a/src/core/CL/OpenCL.cpp b/src/core/CL/OpenCL.cpp
index 0ef800f265..a8ed9733ef 100644
--- a/src/core/CL/OpenCL.cpp
+++ b/src/core/CL/OpenCL.cpp
@@ -115,6 +115,8 @@ bool CLSymbols::load(const std::string &library)
LOAD_FUNCTION_PTR(clSVMFree, handle);
LOAD_FUNCTION_PTR(clEnqueueSVMMap, handle);
LOAD_FUNCTION_PTR(clEnqueueSVMUnmap, handle);
+ LOAD_FUNCTION_PTR(clEnqueueMarker, handle);
+ LOAD_FUNCTION_PTR(clWaitForEvents, handle);
#undef LOAD_FUNCTION_PTR
@@ -133,6 +135,36 @@ bool opencl_is_available()
}
} // namespace arm_compute
+cl_int clEnqueueMarker(cl_command_queue command_queue,
+ cl_event *event)
+{
+ arm_compute::CLSymbols::get().load_default();
+ auto func = arm_compute::CLSymbols::get().clEnqueueMarker_ptr;
+ if(func != nullptr)
+ {
+ return func(command_queue, event);
+ }
+ else
+ {
+ return CL_OUT_OF_RESOURCES;
+ }
+}
+
+cl_int clWaitForEvents(cl_uint num_events,
+ const cl_event *event_list)
+{
+ arm_compute::CLSymbols::get().load_default();
+ auto func = arm_compute::CLSymbols::get().clWaitForEvents_ptr;
+ if(func != nullptr)
+ {
+ return func(num_events, event_list);
+ }
+ else
+ {
+ return CL_OUT_OF_RESOURCES;
+ }
+}
+
cl_int clEnqueueSVMMap(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags flags, void *svm_ptr,
size_t size, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event)
{
diff --git a/src/graph/CL/CLMap.cpp b/src/graph/CL/CLMap.cpp
deleted file mode 100644
index 5289ea9a04..0000000000
--- a/src/graph/CL/CLMap.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/CL/CLMap.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/graph/ITensorObject.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-
-using namespace arm_compute::graph;
-
-CLMap::CLMap(ITensorObject *tensor, bool blocking)
- : _tensor(dynamic_cast<arm_compute::ICLTensor *>(tensor->tensor())), _blocking(blocking)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor);
-}
-
-void CLMap::run()
-{
- _tensor->map(arm_compute::CLScheduler::get().queue(), _blocking);
-}
diff --git a/src/graph/CL/CLUnmap.cpp b/src/graph/CL/CLUnmap.cpp
deleted file mode 100644
index 31f2f19e9c..0000000000
--- a/src/graph/CL/CLUnmap.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/CL/CLUnmap.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/graph/ITensorObject.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-
-using namespace arm_compute::graph;
-
-CLUnmap::CLUnmap(ITensorObject *tensor)
- : _tensor(dynamic_cast<arm_compute::ICLTensor *>(tensor->tensor()))
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor);
-}
-
-void CLUnmap::run()
-{
- _tensor->unmap(arm_compute::CLScheduler::get().queue());
-}
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
index 47bd672114..e1ffeed668 100644
--- a/src/graph/Graph.cpp
+++ b/src/graph/Graph.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,293 +23,205 @@
*/
#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/CL/CLMap.h"
-#include "arm_compute/graph/CL/CLUnmap.h"
-#include "arm_compute/graph/INode.h"
-#include "arm_compute/graph/ITensorObject.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/CLTuner.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "support/ToolchainSupport.h"
-
-#include <sys/stat.h>
-
-using namespace arm_compute::graph;
-
-namespace
+namespace arm_compute
{
-bool file_exists(const std::string &filename)
+namespace graph
{
- std::ifstream file(filename);
- return file.good();
-}
-
-} // namespace
-struct Stage
+Graph::Graph(GraphID id, std::string name)
+ : _id(id), _name(std::move(name)), _nodes(), _edges(), _tensors(), _tagged_nodes(), _mtx()
{
- ITensorObject *_input;
- ITensorObject *_output;
- std::unique_ptr<arm_compute::IFunction> _function;
-};
+}
-struct Graph::Private
-{
-public:
- /** Finalizes the current node's configuration
- *
- * @param _next_hint Device execution hint
- */
- void configure(GraphHints _next_hints);
-
- GraphContext _ctx{};
- std::vector<Stage> _pipeline{};
- std::vector<std::unique_ptr<ITensorObject>> _tensors{};
- std::vector<std::unique_ptr<INode>> _nodes{};
- GraphHints _current_hints{};
- GraphHints _next_hints{};
- std::unique_ptr<ITensorObject> _graph_input{ nullptr };
- std::unique_ptr<ITensorObject> _graph_output{ nullptr };
- std::unique_ptr<INode> _current_node{ nullptr };
- ITensorObject *_current_output{ nullptr };
- bool _info_enabled{ false };
- CLTuner _tuner{};
-
-private:
- ITensorObject *_current_input{ nullptr };
- GraphHints _previous_hints{};
-};
-
-static const std::string tuner_data_filename = "acl_tuner.csv";
-Graph::~Graph() //NOLINT
+bool Graph::remove_node(NodeID nid)
{
- if(_pimpl->_tuner.tune_new_kernels() && !_pimpl->_tuner.lws_table().empty())
+ if(nid >= _nodes.size())
{
- _pimpl->_tuner.save_to_file(tuner_data_filename);
+ return false;
}
-}
-Graph::Graph()
- : _pimpl{ new Private() }
-{
- graph_init();
-}
+ std::unique_ptr<INode> &node = _nodes[nid];
-void Graph::graph_init(const bool use_cl_tuner)
-{
- // Check if OpenCL is available and initialize the scheduler
- if(opencl_is_available())
+ // Remove node connections
+ if(node)
{
- if(_pimpl->_tuner.lws_table().empty() && file_exists(tuner_data_filename))
+ for(auto &input_eid : node->_input_edges)
{
- _pimpl->_tuner.load_from_file(tuner_data_filename);
+ remove_connection(input_eid);
}
- _pimpl->_tuner.set_tune_new_kernels(use_cl_tuner);
- arm_compute::CLScheduler::get().default_init(&_pimpl->_tuner);
- }
-}
-void Graph::run()
-{
- while(true)
- {
- if(_pimpl->_graph_input->has_accessor() && !_pimpl->_graph_input->call_accessor())
+ for(auto &outpud_eid : node->_output_edges)
{
- return;
+ remove_connection(outpud_eid);
}
+ }
- for(auto &stage : _pimpl->_pipeline)
- {
- stage._function->run();
- }
+ node = nullptr;
- if((_pimpl->_graph_output->has_accessor() && !_pimpl->_graph_output->call_accessor())
- || (!_pimpl->_graph_output->has_accessor()))
- {
- return;
- }
- }
+ return true;
}
-//Finalize current node's configuration
-void Graph::Private::configure(GraphHints _next_hints)
+EdgeID Graph::add_connection(NodeID source, size_t source_idx, NodeID sink, size_t sink_idx)
{
- ARM_COMPUTE_ERROR_ON(_current_node == nullptr);
- ARM_COMPUTE_ERROR_ON(_graph_input == nullptr);
+ std::lock_guard<arm_compute::Mutex> lock(_mtx);
- // Is it the first node of the graph ?
- if(_current_input == nullptr)
- {
- _graph_input->set_target(_current_hints.target_hint());
- _current_input = _graph_input.get();
- _previous_hints = _current_hints; // For the first node just assume the previous node was of the same type as this one
- }
+ // Check if node index is valid, if node exists and finally if the connection index is valid
+ ARM_COMPUTE_ERROR_ON((source >= _nodes.size()) || (_nodes[source] == nullptr) || (source_idx >= _nodes[source]->num_outputs()));
+ ARM_COMPUTE_ERROR_ON((sink >= _nodes.size()) || (_nodes[sink] == nullptr) || (sink_idx >= _nodes[sink]->num_inputs()));
- if(_current_node->supports_in_place())
- {
- _current_output = _current_input;
- }
+ // Get nodes
+ std::unique_ptr<INode> &source_node = _nodes[source];
+ std::unique_ptr<INode> &sink_node = _nodes[sink];
- //Automatic output configuration ?
- if(_current_output == nullptr)
+ // Check for duplicate connections (Check only sink node)
+ Edge *sink_node_edge = sink_node->input_edge(sink_idx);
+ if((sink_node_edge != nullptr) && (sink_node_edge->producer_id() == source) && (sink_node_edge->producer_idx() == source_idx)
+ && (sink_node_edge->consumer_id() == sink) && (sink_node_edge->consumer_idx() == sink_idx))
{
- _tensors.push_back(arm_compute::support::cpp14::make_unique<Tensor>(TensorInfo()));
- _current_output = _tensors.back().get();
+ return sink_node_edge->id();
}
- // If either the writer or reader node needs OpenCL then use OpenCL memory:
- if((_next_hints.target_hint() == TargetHint::OPENCL || _current_hints.target_hint() == TargetHint::OPENCL))
+ // Check if there is already a tensor associated with output if not create one
+ TensorID tid = source_node->output_id(source_idx);
+ if(tid == NullTensorID)
{
- _current_output->set_target(TargetHint::OPENCL);
- }
- else
- {
- _current_output->set_target(TargetHint::NEON);
+ tid = create_tensor();
}
+ std::unique_ptr<Tensor> &tensor = _tensors[tid];
+
+ // Create connections
+ EdgeID eid = _edges.size();
+ auto connection = arm_compute::support::cpp14::make_unique<Edge>(eid, source_node.get(), source_idx, sink_node.get(), sink_idx, tensor.get());
+ _edges.push_back(std::move(connection));
+
+ // Add connections to source and sink nodes
+ source_node->_output_edges.insert(eid);
+ sink_node->_input_edges[sink_idx] = eid;
- // Instantiate Node
- _ctx.hints() = _current_hints;
- std::unique_ptr<arm_compute::IFunction> func = _current_node->instantiate_node(_ctx, _current_input, _current_output);
+ // Set tensor output node
+ source_node->_outputs[source_idx] = tid;
+
+ // Bind tensor to the edge
+ tensor->bind_edge(eid);
+
+ // Try and propagate shapes in sink node
+ sink_node->forward_descriptors();
+
+ return eid;
+}
- // If the operation is done in-place, do not allocate or it will prevent following layers from performing the configuration
- if(!_current_node->supports_in_place())
+bool Graph::remove_connection(EdgeID eid)
+{
+ if(eid >= _edges.size())
{
- // Allocate current input
- _current_input->allocate();
+ return false;
}
- // Map input if needed
- if(_current_input->target() == TargetHint::OPENCL)
+ std::unique_ptr<Edge> &edge = _edges[eid];
+
+ // Remove node connections
+ if(edge != nullptr)
{
- if(_previous_hints.target_hint() == TargetHint::NEON)
+ // Get tensor bound to the edge
+ if(edge->tensor() != nullptr)
+ {
+ edge->tensor()->unbind_edge(eid);
+ }
+
+ // Remove edges from source node
+ if(edge->producer() != nullptr)
{
- ARM_COMPUTE_ERROR_ON(_current_hints.target_hint() == TargetHint::NEON);
- _pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLUnmap>(_current_input) });
+ edge->producer()->_output_edges.erase(eid);
}
- if(_current_hints.target_hint() == TargetHint::NEON)
+
+ // Remove edges from sink node
+ if((edge->consumer() != nullptr) && (edge->consumer_idx() < edge->consumer()->_input_edges.size()))
{
- ARM_COMPUTE_ERROR_ON(_previous_hints.target_hint() == TargetHint::NEON);
- _pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLMap>(_current_input, true) });
+ edge->consumer()->_input_edges[edge->consumer_idx()] = EmptyEdgeID;
}
}
- _pipeline.push_back({ _current_input, _current_output, std::move(func) });
+ // Clear edge
+ edge = nullptr;
- _current_input = _current_output;
- _current_output = nullptr;
- std::swap(_previous_hints, _current_hints);
- std::swap(_current_hints, _next_hints);
+ return true;
}
-void Graph::add_node(std::unique_ptr<INode> node)
+TensorID Graph::create_tensor(TensorDescriptor desc)
{
- ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_input == nullptr, "The graph's input must be set before the first node is added");
- ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_output != nullptr, "Nothing can be added after the output tensor");
- //Trigger the creation of the current Node:
-
- GraphHints _next_hints = _pimpl->_next_hints;
- _next_hints.set_target_hint(node->override_target_hint(_pimpl->_next_hints.target_hint()));
- ARM_COMPUTE_ERROR_ON(_next_hints.target_hint() == TargetHint::DONT_CARE);
- if(_pimpl->_current_node)
- {
- //Finalize the previous Node:
- _pimpl->configure(_pimpl->_next_hints);
- }
- else
- {
- // If that's the first node then use the same TargetHint before and after the node.
- _pimpl->_current_hints = _next_hints;
- }
- if(_pimpl->_current_node)
- {
- _pimpl->_nodes.push_back(std::move(_pimpl->_current_node));
- }
- _pimpl->_current_node = std::move(node);
+ TensorID tid = _tensors.size();
+ auto tensor = support::cpp14::make_unique<Tensor>(tid, desc);
+ _tensors.push_back(std::move(tensor));
+
+ return tid;
}
-//Add a tensor with an Accessor (i.e either the input or output of the graph)
-void Graph::add_tensor_object(std::unique_ptr<ITensorObject> tensor)
+std::string Graph::name() const
{
- // If it's the first Tensor added then it will be the input of the Graph.
- if(_pimpl->_graph_input == nullptr)
- {
- ARM_COMPUTE_ERROR_ON(_pimpl->_graph_output != nullptr);
- ARM_COMPUTE_ERROR_ON(_pimpl->_current_node != nullptr);
- _pimpl->_graph_input = std::move(tensor);
- }
- else
- {
- // Else it will be the output of the Graph
- ARM_COMPUTE_ERROR_ON(_pimpl->_graph_output != nullptr);
- ARM_COMPUTE_ERROR_ON(_pimpl->_current_node == nullptr);
- _pimpl->_graph_output = std::move(tensor);
- _pimpl->_current_output = _pimpl->_graph_output.get();
-
- // Finalize the graph by configuring the last Node of the graph:
- _pimpl->configure(_pimpl->_current_hints); // Ignore _next_hint as this is the last node, and just use the same hint as before this node.
- _pimpl->_graph_output->allocate();
- }
+ return _name;
}
-bool Graph::opencl_is_available()
+GraphID Graph::id() const
{
- return arm_compute::opencl_is_available();
+ return _id;
}
-arm_compute::GPUTarget Graph::gpu_target()
+const std::vector<NodeID> &Graph::inputs()
{
- // Check if OpenCL is available before returning the GPU target
- if(opencl_is_available())
- {
- return arm_compute::CLScheduler::get().target();
- }
- else
- {
- return GPUTarget::MIDGARD;
- }
+ return _tagged_nodes[NodeType::Input];
+}
+
+std::vector<std::unique_ptr<INode>> &Graph::nodes()
+{
+ return _nodes;
}
-void Graph::set_temp(TensorInfo &&tmp)
+const std::vector<std::unique_ptr<INode>> &Graph::nodes() const
{
- ARM_COMPUTE_ERROR_ON(_pimpl->_graph_input == nullptr);
- ARM_COMPUTE_ERROR_ON(_pimpl->_graph_output != nullptr);
- ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_current_output != nullptr, "TensorInfo for temporary tensor already set");
+ return _nodes;
+}
+
+const std::vector<std::unique_ptr<Edge>> &Graph::edges() const
+{
+ return _edges;
+}
- _pimpl->_tensors.push_back(arm_compute::support::cpp14::make_unique<Tensor>(std::move(tmp)));
- _pimpl->_current_output = _pimpl->_tensors.back().get();
+std::vector<std::unique_ptr<Tensor>> &Graph::tensors()
+{
+ return _tensors;
+}
+
+const std::vector<std::unique_ptr<Tensor>> &Graph::tensors() const
+{
+ return _tensors;
}
-GraphHints &Graph::hints()
+const INode *Graph::node(NodeID id) const
{
- return _pimpl->_next_hints;
+ return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
}
-Graph &arm_compute::graph::operator<<(Graph &graph, TensorInfo &&info)
+INode *Graph::node(NodeID id)
{
- graph.set_temp(std::move(info));
- return graph;
+ return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
}
-Graph &arm_compute::graph::operator<<(Graph &graph, Tensor &&tensor)
+const Edge *Graph::edge(EdgeID id) const
{
- graph.add_tensor_object(arm_compute::support::cpp14::make_unique<Tensor>(std::move(tensor)));
- return graph;
+ return (id >= _edges.size()) ? nullptr : _edges[id].get();
}
-Graph &arm_compute::graph::operator<<(Graph &graph, SubTensor &&sub_tensor)
+Edge *Graph::edge(EdgeID id)
{
- graph.add_tensor_object(arm_compute::support::cpp14::make_unique<SubTensor>(std::move(sub_tensor)));
- return graph;
+ return (id >= _edges.size()) ? nullptr : _edges[id].get();
}
-Graph &arm_compute::graph::operator<<(Graph &graph, TargetHint target_hint)
+const Tensor *Graph::tensor(TensorID id) const
{
- graph.hints().set_target_hint(target_hint);
- return graph;
+ return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
}
-Graph &arm_compute::graph::operator<<(Graph &graph, ConvolutionMethodHint conv_method_hint)
+Tensor *Graph::tensor(TensorID id)
{
- graph.hints().set_convolution_method_hint(conv_method_hint);
- return graph;
+ return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index e6fc2afe21..0d1bdc3596 100644
--- a/src/graph2/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -21,19 +21,19 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/GraphBuilder.h"
+#include "arm_compute/graph/GraphBuilder.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Utils.h"
-#include "arm_compute/graph2/algorithms/BFS.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Utils.h"
+#include "arm_compute/graph/algorithms/BFS.h"
+#include "arm_compute/graph/nodes/Nodes.h"
#define CHECK_NODEIDX_PAIR(pair, g) \
ARM_COMPUTE_ERROR_ON(((pair).node_id >= (g).nodes().size()) || ((g).node((pair).node_id) == nullptr) || ((pair).index >= (g).node((pair).node_id)->num_outputs()));
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace
{
@@ -390,5 +390,5 @@ NodeID GraphBuilder::add_split_node(Graph &g, NodeParams params, NodeIdxPair inp
{
return create_simple_single_input_output_node<SplitLayerNode>(g, params, input, num_splits, axis);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/GraphContext.cpp b/src/graph/GraphContext.cpp
index bfc6fcdfca..6fc45c0aa7 100644
--- a/src/graph/GraphContext.cpp
+++ b/src/graph/GraphContext.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,45 +22,53 @@
* SOFTWARE.
*/
#include "arm_compute/graph/GraphContext.h"
+#include <arm_compute/graph.h>
-using namespace arm_compute::graph;
-
-GraphHints::GraphHints(TargetHint target_hint, ConvolutionMethodHint conv_method_hint)
- : _target_hint(target_hint), _convolution_method_hint(conv_method_hint)
+namespace arm_compute
{
-}
-
-void GraphHints::set_target_hint(TargetHint target_hint)
+namespace graph
{
- _target_hint = target_hint;
-}
-
-void GraphHints::set_convolution_method_hint(ConvolutionMethodHint convolution_method)
+GraphContext::GraphContext()
+ : _config(), _memory_managers()
{
- _convolution_method_hint = convolution_method;
}
-TargetHint GraphHints::target_hint() const
+const GraphConfig &GraphContext::config() const
{
- return _target_hint;
+ return _config;
}
-ConvolutionMethodHint GraphHints::convolution_method_hint() const
+void GraphContext::set_config(const GraphConfig &config)
{
- return _convolution_method_hint;
+ _config = config;
}
-GraphContext::GraphContext()
- : _hints()
+bool GraphContext::insert_memory_management_ctx(MemoryManagerContext &&memory_ctx)
{
+ Target target = memory_ctx.target;
+ if(target == Target::UNSPECIFIED || _memory_managers.find(target) != std::end(_memory_managers))
+ {
+ return false;
+ }
+
+ _memory_managers[target] = std::move(memory_ctx);
+ return true;
}
-GraphHints &GraphContext::hints()
+MemoryManagerContext *GraphContext::memory_management_ctx(Target target)
{
- return _hints;
+ return (_memory_managers.find(target) != std::end(_memory_managers)) ? &_memory_managers[target] : nullptr;
}
-const GraphHints &GraphContext::hints() const
+void GraphContext::finalize()
{
- return _hints;
-} \ No newline at end of file
+ for(auto &mm_obj : _memory_managers)
+ {
+ if(mm_obj.second.mm != nullptr)
+ {
+ mm_obj.second.mm->finalize();
+ }
+ }
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/GraphManager.cpp b/src/graph/GraphManager.cpp
index a51ba61104..759300e0c9 100644
--- a/src/graph2/GraphManager.cpp
+++ b/src/graph/GraphManager.cpp
@@ -21,18 +21,18 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/GraphManager.h"
+#include "arm_compute/graph/GraphManager.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/PassManager.h"
-#include "arm_compute/graph2/Utils.h"
-#include "arm_compute/graph2/detail/ExecutionHelpers.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/PassManager.h"
+#include "arm_compute/graph/Utils.h"
+#include "arm_compute/graph/detail/ExecutionHelpers.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
GraphManager::GraphManager()
: _workloads()
@@ -113,5 +113,5 @@ void GraphManager::invalidate_graph(Graph &graph)
_workloads.erase(it);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/INode.cpp b/src/graph/INode.cpp
index c753f66b43..c1c18e5853 100644
--- a/src/graph/INode.cpp
+++ b/src/graph/INode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,33 +23,171 @@
*/
#include "arm_compute/graph/INode.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph/Edge.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Tensor.h"
-using namespace arm_compute::graph;
+namespace arm_compute
+{
+namespace graph
+{
+// *INDENT-OFF*
+// clang-format off
+INode::INode()
+ : _graph(nullptr), _id(EmptyNodeID), _common_params({ "", Target::UNSPECIFIED}),
+ _outputs(), _input_edges(), _output_edges(), _assigned_target(Target::UNSPECIFIED)
+{
+}
+// clang-format on
+// *INDENT-ON*
-TargetHint INode::override_target_hint(TargetHint target_hint) const
+void INode::set_graph(Graph *g)
{
- if(target_hint == TargetHint::OPENCL && !opencl_is_available())
+ ARM_COMPUTE_ERROR_ON(g == nullptr);
+ _graph = g;
+}
+
+void INode::set_id(NodeID id)
+{
+ _id = id;
+}
+
+void INode::set_common_node_parameters(NodeParams common_params)
+{
+ _common_params = std::move(common_params);
+}
+
+void INode::set_requested_target(Target target)
+{
+ _common_params.target = target;
+}
+
+void INode::set_assigned_target(Target target)
+{
+ _assigned_target = target;
+}
+
+void INode::set_output_tensor(TensorID tid, size_t idx)
+{
+ if(tid != NullTensorID && (idx < _outputs.size()) && (_graph->tensor(tid) != nullptr))
{
- target_hint = TargetHint::DONT_CARE;
+ ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+ Tensor *updated_tensor = _graph->tensor(tid);
+ _outputs[idx] = tid;
+
+ // Set tensor to all output edges of the node
+ for(auto &output_edge_id : _output_edges)
+ {
+ auto output_edge = _graph->edge(output_edge_id);
+ if(output_edge != nullptr)
+ {
+ // Unbind edge from current tensor
+ auto current_output_tensor = output_edge->tensor();
+ current_output_tensor->unbind_edge(output_edge->id());
+
+ // Update tensor to edge and rebind tensor
+ output_edge->update_bound_tensor(updated_tensor);
+ updated_tensor->bind_edge(output_edge->id());
+ }
+ }
}
- GraphHints hints{ target_hint };
- target_hint = node_override_hints(hints).target_hint();
- ARM_COMPUTE_ERROR_ON(target_hint == TargetHint::OPENCL && !opencl_is_available());
- return target_hint;
}
-bool INode::supports_in_place() const
+
+NodeID INode::id() const
+{
+ return _id;
+}
+
+std::string INode::name() const
+{
+ return _common_params.name;
+}
+
+const Graph *INode::graph() const
+{
+ return _graph;
+}
+
+Graph *INode::graph()
+{
+ return _graph;
+}
+
+const std::vector<TensorID> &INode::outputs() const
{
- return _supports_in_place;
+ return _outputs;
}
-void INode::set_supports_in_place(bool value)
+
+const std::vector<EdgeID> &INode::input_edges() const
+{
+ return _input_edges;
+}
+
+const std::set<EdgeID> &INode::output_edges() const
+{
+ return _output_edges;
+}
+
+TensorID INode::input_id(size_t idx) const
+{
+ ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+ Edge *e = _graph->edge(_input_edges[idx]);
+ return (e != nullptr) ? e->tensor_id() : NullTensorID;
+}
+
+TensorID INode::output_id(size_t idx) const
+{
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+ return _outputs[idx];
+}
+
+Tensor *INode::input(size_t idx) const
+{
+ ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+ ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+ Edge *e = _graph->edge(_input_edges[idx]);
+ return (e != nullptr) ? e->tensor() : nullptr;
+}
+
+Tensor *INode::output(size_t idx) const
{
- _supports_in_place = value;
+ ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+ return _graph->tensor(_outputs[idx]);
}
-GraphHints INode::node_override_hints(GraphHints hints) const
+
+EdgeID INode::input_edge_id(size_t idx) const
+{
+ ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+ return _input_edges[idx];
+}
+
+Edge *INode::input_edge(size_t idx) const
+{
+ ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+ ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+ return _graph->edge(_input_edges[idx]);
+}
+
+size_t INode::num_inputs() const
+{
+ return _input_edges.size();
+}
+
+size_t INode::num_outputs() const
+{
+ return _outputs.size();
+}
+
+Target INode::requested_target() const
+{
+ return _common_params.target;
+}
+
+Target INode::assigned_target() const
{
- TargetHint target_hint = hints.target_hint();
- hints.set_target_hint((target_hint == TargetHint::DONT_CARE) ? TargetHint::NEON : target_hint);
- return hints;
+ return _assigned_target;
}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/NodeContext.cpp b/src/graph/NodeContext.cpp
deleted file mode 100644
index 2aa5aa13e8..0000000000
--- a/src/graph/NodeContext.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/NodeContext.h"
-
-using namespace arm_compute::graph;
-
-void NodeContext::set_target(TargetHint target)
-{
- _target = target;
-}
-
-void NodeContext::add_input(arm_compute::ITensor *input)
-{
- ARM_COMPUTE_ERROR_ON(input == nullptr);
- _inputs.emplace_back(input);
-}
-
-void NodeContext::add_output(arm_compute::ITensor *output)
-{
- ARM_COMPUTE_ERROR_ON(output == nullptr);
- _outputs.emplace_back(output);
-}
-
-OperationType NodeContext::operation() const
-{
- return _operation;
-}
-
-TargetHint NodeContext::target() const
-{
- return _target;
-}
-
-arm_compute::ITensor *NodeContext::input(size_t idx) const
-{
- ARM_COMPUTE_ERROR_ON(idx >= _inputs.size());
- return _inputs[idx];
-}
-
-arm_compute::ITensor *NodeContext::output(size_t idx) const
-{
- ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
- return _outputs[idx];
-}
-
-size_t NodeContext::num_inputs() const
-{
- return _inputs.size();
-}
-
-size_t NodeContext::num_outputs() const
-{
- return _outputs.size();
-} \ No newline at end of file
diff --git a/src/graph/OperationRegistry.cpp b/src/graph/OperationRegistry.cpp
deleted file mode 100644
index 651653f19c..0000000000
--- a/src/graph/OperationRegistry.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-OperationRegistry::OperationRegistry()
- : _registered_ops()
-{
-}
-
-OperationRegistry &OperationRegistry::get()
-{
- static OperationRegistry instance;
- return instance;
-}
-
-IOperation *OperationRegistry::find_operation(OperationType operation, TargetHint target)
-{
- ARM_COMPUTE_ERROR_ON(!contains(operation, target));
- auto it = std::find_if(_registered_ops[operation].begin(), _registered_ops[operation].end(), [&](const std::unique_ptr<IOperation> &op)
- {
- return (op->target() == target);
- });
- ARM_COMPUTE_ERROR_ON(it == _registered_ops[operation].end());
- return (*it).get();
-}
-
-bool OperationRegistry::contains(OperationType operation, TargetHint target) const
-{
- auto it = _registered_ops.find(operation);
- if(it != _registered_ops.end())
- {
- return std::any_of(it->second.begin(), it->second.end(), [&](const std::unique_ptr<IOperation> &op)
- {
- return (op->target() == target);
- });
- }
- return false;
-}
diff --git a/src/graph2/PassManager.cpp b/src/graph/PassManager.cpp
index 2fa937bd89..8ed68bd99b 100644
--- a/src/graph2/PassManager.cpp
+++ b/src/graph/PassManager.cpp
@@ -21,13 +21,13 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/PassManager.h"
+#include "arm_compute/graph/PassManager.h"
-#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph/Logger.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
PassManager::PassManager()
: _passes()
@@ -84,5 +84,5 @@ void PassManager::run(Graph &g, size_t index)
pass->mutate(g);
}
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/SubGraph.cpp b/src/graph/SubGraph.cpp
deleted file mode 100644
index b1cbb9cc95..0000000000
--- a/src/graph/SubGraph.cpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/SubGraph.h"
-
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/INode.h"
-#include "arm_compute/graph/Tensor.h"
-
-using namespace arm_compute::graph;
-
-SubGraph::SubGraph()
- : _nodes(), _input(nullptr), _output(nullptr)
-{
-}
-
-void SubGraph::add_node(std::unique_ptr<INode> node)
-{
- _nodes.push_back(std::move(node));
-}
-
-void SubGraph::add_tensor_object(std::unique_ptr<ITensorObject> tensor)
-{
- // If it's the first Tensor added then it will be the input of the Graph.
- if(_input == nullptr)
- {
- _input = std::move(tensor);
- }
- else
- {
- _output = std::move(tensor);
- }
-}
-
-std::unique_ptr<Graph> SubGraph::construct(const GraphContext &ctx, std::unique_ptr<ITensorObject> input, std::unique_ptr<ITensorObject> output)
-{
- auto graph = arm_compute::support::cpp14::make_unique<Graph>();
-
- // Set hint
- // TODO(geopin01): store hints of sub-graph
- graph->hints() = ctx.hints();
-
- // Configure input
- if(_input == nullptr)
- {
- _input = std::move(input);
- }
- graph->add_tensor_object(std::move(_input));
-
- // Make sure first and last nodes of the subgraph always do operations out-of-place
- _nodes.front()->set_supports_in_place(false);
- _nodes.back()->set_supports_in_place(false);
-
- // Construct nodes
- for(auto &node : _nodes)
- {
- graph->add_node(std::move(node));
- }
-
- // Configure output
- if(_output == nullptr)
- {
- _output = std::move(output);
- }
- graph->add_tensor_object(std::move(_output));
-
- return graph;
-}
-
-bool SubGraph::has_input() const
-{
- return _input != nullptr;
-}
-
-bool SubGraph::has_output() const
-{
- return _output != nullptr;
-}
-
-SubGraph &arm_compute::graph::operator<<(SubGraph &graph, Tensor &&tensor)
-{
- graph.add_tensor_object(arm_compute::support::cpp14::make_unique<Tensor>(std::move(tensor)));
- return graph;
-}
-
-SubGraph &arm_compute::graph::operator<<(SubGraph &graph, SubTensor &&sub_tensor)
-{
- graph.add_tensor_object(arm_compute::support::cpp14::make_unique<SubTensor>(std::move(sub_tensor)));
- return graph;
-}
diff --git a/src/graph/SubTensor.cpp b/src/graph/SubTensor.cpp
deleted file mode 100644
index 2e640dd93c..0000000000
--- a/src/graph/SubTensor.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/SubTensor.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/runtime/CL/CLSubTensor.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/SubTensor.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "utils/TypePrinter.h"
-
-using namespace arm_compute::graph;
-
-namespace
-{
-template <typename SubTensorType, typename ParentTensorType>
-std::unique_ptr<arm_compute::ITensor> initialise_subtensor(arm_compute::ITensor *parent, TensorShape shape, Coordinates coords, bool extend_parent)
-{
- auto ptensor = dynamic_cast<ParentTensorType *>(parent);
- auto subtensor = arm_compute::support::cpp14::make_unique<SubTensorType>(ptensor, shape, coords, extend_parent);
- return std::move(subtensor);
-}
-} // namespace
-
-SubTensor::SubTensor()
- : _target(TargetHint::DONT_CARE), _tensor_shape(), _coords(), _parent(nullptr), _subtensor(nullptr), _extend_parent(false)
-{
-}
-
-SubTensor::SubTensor(Tensor &parent, TensorShape tensor_shape, Coordinates coords, bool extend_parent)
- : _target(TargetHint::DONT_CARE), _tensor_shape(tensor_shape), _coords(coords), _parent(nullptr), _subtensor(nullptr), _extend_parent(extend_parent)
-{
- ARM_COMPUTE_ERROR_ON(parent.tensor() == nullptr);
- _parent = parent.tensor();
- _target = parent.target();
-
- instantiate_subtensor();
-}
-
-SubTensor::SubTensor(arm_compute::ITensor *parent, TensorShape tensor_shape, Coordinates coords, TargetHint target, bool extend_parent)
- : _target(target), _tensor_shape(tensor_shape), _coords(coords), _parent(parent), _subtensor(nullptr), _extend_parent(extend_parent)
-{
- ARM_COMPUTE_ERROR_ON(parent == nullptr);
- instantiate_subtensor();
-}
-
-bool SubTensor::call_accessor()
-{
- return true;
-}
-
-bool SubTensor::has_accessor() const
-{
- return false;
-}
-
-arm_compute::ITensor *SubTensor::set_target(TargetHint target)
-{
- ARM_COMPUTE_ERROR_ON(target != _target);
- return (target == _target) ? _subtensor.get() : nullptr;
-}
-
-arm_compute::ITensor *SubTensor::tensor()
-{
- return _subtensor.get();
-}
-
-const arm_compute::ITensor *SubTensor::tensor() const
-{
- return _subtensor.get();
-}
-
-TargetHint SubTensor::target() const
-{
- return _target;
-}
-
-void SubTensor::allocate()
-{
- // NOP for sub-tensors
-}
-
-void SubTensor::instantiate_subtensor()
-{
- switch(_target)
- {
- case TargetHint::OPENCL:
- _subtensor = initialise_subtensor<arm_compute::CLSubTensor, arm_compute::ICLTensor>(_parent, _tensor_shape, _coords, _extend_parent);
- break;
- case TargetHint::NEON:
- _subtensor = initialise_subtensor<arm_compute::SubTensor, arm_compute::ITensor>(_parent, _tensor_shape, _coords, _extend_parent);
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid TargetHint");
- }
-}
diff --git a/src/graph/Tensor.cpp b/src/graph/Tensor.cpp
index 4db79e93ad..47fb5c65bc 100644
--- a/src/graph/Tensor.cpp
+++ b/src/graph/Tensor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,138 +23,89 @@
*/
#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "utils/TypePrinter.h"
-
-using namespace arm_compute::graph;
-
-namespace
+namespace arm_compute
{
-template <typename TensorType>
-std::unique_ptr<arm_compute::ITensor> initialise_tensor(TensorInfo &info)
+namespace graph
{
- auto tensor = arm_compute::support::cpp14::make_unique<TensorType>();
- tensor->allocator()->init(info);
- return std::move(tensor);
-}
-
-template <typename TensorType>
-void tensor_allocate(arm_compute::ITensor &tensor)
+Tensor::Tensor(TensorID id, TensorDescriptor desc)
+ : _id(id), _desc(desc), _handle(nullptr), _accessor(nullptr), _bound_edges()
{
- auto itensor = dynamic_cast<TensorType *>(&tensor);
- ARM_COMPUTE_ERROR_ON_NULLPTR(itensor);
- itensor->allocator()->allocate();
}
-} // namespace
-Tensor::Tensor(TensorInfo &&info)
- : _target(TargetHint::DONT_CARE), _info(info), _accessor(nullptr), _tensor(nullptr)
+TensorID Tensor::id() const
{
+ return _id;
}
-Tensor::Tensor(Tensor &&src) noexcept
- : _target(src._target),
- _info(std::move(src._info)),
- _accessor(std::move(src._accessor)),
- _tensor(std::move(src._tensor))
+TensorDescriptor &Tensor::desc()
{
+ return _desc;
}
-void Tensor::set_info(TensorInfo &&info)
+const TensorDescriptor &Tensor::desc() const
{
- _info = info;
-}
-
-bool Tensor::call_accessor()
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(_accessor.get());
- auto cl_tensor = dynamic_cast<arm_compute::CLTensor *>(_tensor.get());
- if(cl_tensor != nullptr && cl_tensor->buffer() == nullptr)
- {
- cl_tensor->map();
- }
- bool retval = _accessor->access_tensor(*_tensor);
- if(cl_tensor != nullptr)
- {
- cl_tensor->unmap();
- }
- return retval;
+ return _desc;
}
-bool Tensor::has_accessor() const
+void Tensor::set_handle(std::unique_ptr<ITensorHandle> backend_tensor)
{
- return (_accessor != nullptr);
+ _handle = std::move(backend_tensor);
}
-arm_compute::ITensor *Tensor::tensor()
+ITensorHandle *Tensor::handle()
{
- return _tensor.get();
+ return _handle.get();
}
-const arm_compute::ITensor *Tensor::tensor() const
+void Tensor::set_accessor(std::unique_ptr<ITensorAccessor> accessor)
{
- return _tensor.get();
+ _accessor = std::move(accessor);
}
-const TensorInfo &Tensor::info() const
+ITensorAccessor *Tensor::accessor()
{
- return _info;
+ return _accessor.get();
}
-arm_compute::ITensor *Tensor::set_target(TargetHint target)
+bool Tensor::call_accessor()
{
- if(_tensor != nullptr)
+ // Early exit guard
+ if(!_accessor || !_handle)
{
- ARM_COMPUTE_ERROR_ON(target != _target);
+ return false;
}
- else
+
+ // Map tensor
+ _handle->map(true);
+
+ // Return in case of null backend buffer
+ if(_handle->tensor().buffer() == nullptr)
{
- switch(target)
- {
- case TargetHint::OPENCL:
- _tensor = initialise_tensor<arm_compute::CLTensor>(_info);
- break;
- case TargetHint::NEON:
- _tensor = initialise_tensor<arm_compute::Tensor>(_info);
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid TargetHint");
- }
- _target = target;
+ return false;
}
- return _tensor.get();
+
+ // Call accessor
+ _accessor->access_tensor(_handle->tensor());
+
+ // Unmap tensor
+ _handle->unmap();
+
+ return true;
}
-void Tensor::allocate()
+void Tensor::bind_edge(EdgeID eid)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor.get());
- switch(_target)
- {
- case TargetHint::OPENCL:
- tensor_allocate<arm_compute::CLTensor>(*_tensor);
- break;
- case TargetHint::NEON:
- tensor_allocate<arm_compute::Tensor>(*_tensor);
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid TargetHint");
- }
+ _bound_edges.insert(eid);
}
-void Tensor::allocate_and_fill_if_needed()
+void Tensor::unbind_edge(EdgeID eid)
{
- allocate();
- if(_accessor != nullptr)
- {
- call_accessor();
- }
+ _bound_edges.erase(eid);
}
-TargetHint Tensor::target() const
+const std::set<EdgeID> Tensor::bound_edges() const
{
- return _target;
+ return _bound_edges;
}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/Utils.cpp b/src/graph/Utils.cpp
index a3e90f43bc..8537bbfb2a 100644
--- a/src/graph2/Utils.cpp
+++ b/src/graph/Utils.cpp
@@ -21,19 +21,19 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/Utils.h"
+#include "arm_compute/graph/Utils.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/backends/BackendRegistry.h"
-#include "arm_compute/graph2/mutators/GraphMutators.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
+#include "arm_compute/graph/mutators/GraphMutators.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
bool is_target_supported(Target target)
{
- return backends::BackendRegistry::get().contains(target);
+ return backends::BackendRegistry::get().contains(target) && backends::BackendRegistry::get().find_backend(target)->is_backend_supported();
}
Target get_default_target()
@@ -100,5 +100,5 @@ void setup_default_graph_context(GraphContext &ctx)
backend.second->setup_backend_context(ctx);
}
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/Workload.cpp b/src/graph/Workload.cpp
index 3fd36fabc7..c53a8a42da 100644
--- a/src/graph2/Workload.cpp
+++ b/src/graph/Workload.cpp
@@ -21,14 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/Workload.h"
+#include "arm_compute/graph/Workload.h"
-#include "arm_compute/graph2/INode.h"
-#include "arm_compute/graph2/ITensorHandle.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/ITensorHandle.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
void ExecutionTask::operator()()
{
@@ -37,5 +37,5 @@ void ExecutionTask::operator()()
task->run();
}
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/BackendRegistry.cpp b/src/graph/backends/BackendRegistry.cpp
index 5f1218f335..2803322e64 100644
--- a/src/graph2/backends/BackendRegistry.cpp
+++ b/src/graph/backends/BackendRegistry.cpp
@@ -21,13 +21,13 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/BackendRegistry.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
-using namespace arm_compute::graph2::backends;
+using namespace arm_compute::graph::backends;
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -59,5 +59,5 @@ const std::map<Target, std::unique_ptr<IDeviceBackend>> &BackendRegistry::backen
return _registered_backends;
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph2/backends/CL/CLDeviceBackend.cpp b/src/graph/backends/CL/CLDeviceBackend.cpp
index 71566d2f1f..f10eb33a98 100644
--- a/src/graph2/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph/backends/CL/CLDeviceBackend.cpp
@@ -21,18 +21,18 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/CL/CLDeviceBackend.h"
-
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/INode.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/backends/BackendRegistrar.h"
-#include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
-#include "arm_compute/graph2/backends/CL/CLNodeValidator.h"
-#include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
-#include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
+#include "arm_compute/graph/backends/CL/CLDeviceBackend.h"
+
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/backends/BackendRegistrar.h"
+#include "arm_compute/graph/backends/CL/CLFunctionFactory.h"
+#include "arm_compute/graph/backends/CL/CLNodeValidator.h"
+#include "arm_compute/graph/backends/CL/CLSubTensorHandle.h"
+#include "arm_compute/graph/backends/CL/CLTensorHandle.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/BlobLifetimeManager.h"
@@ -45,7 +45,7 @@
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -114,6 +114,11 @@ void CLDeviceBackend::setup_backend_context(GraphContext &ctx)
}
}
+bool CLDeviceBackend::is_backend_supported()
+{
+ return arm_compute::opencl_is_available();
+}
+
std::unique_ptr<ITensorHandle> CLDeviceBackend::create_tensor(const Tensor &tensor)
{
// Get tensor descriptor
@@ -171,5 +176,5 @@ std::shared_ptr<arm_compute::IMemoryManager> CLDeviceBackend::create_memory_mana
return mm;
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
index 5a51b19e18..1b448fefd2 100644
--- a/src/graph2/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -21,16 +21,16 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
+#include "arm_compute/graph/backends/CL/CLFunctionFactory.h"
#include "arm_compute/core/utils/misc/Cast.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/TypePrinter.h"
-#include "arm_compute/graph2/Types.h"
-#include "arm_compute/graph2/backends/Utils.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/TypePrinter.h"
+#include "arm_compute/graph/Types.h"
+#include "arm_compute/graph/backends/Utils.h"
+#include "arm_compute/graph/nodes/Nodes.h"
#include "arm_compute/runtime/CL/CLFunctions.h"
#include "support/ToolchainSupport.h"
@@ -39,7 +39,7 @@ using namespace arm_compute::utils::cast;
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -51,12 +51,12 @@ namespace
*
* @return Backing tensor if present else nullptr
*/
-arm_compute::ICLTensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+arm_compute::ICLTensor *get_backing_tensor(arm_compute::graph::Tensor *tensor)
{
arm_compute::ICLTensor *backing_tensor = nullptr;
if(tensor != nullptr)
{
- ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph2::Target::CL);
+ ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph::Target::CL);
// Get backing tensor handle
ITensorHandle *tensor_handle = tensor->handle();
// Get backing tensor
@@ -586,5 +586,5 @@ std::unique_ptr<IFunction> CLFunctionFactory::create(INode *node, GraphContext &
}
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/CL/CLNodeValidator.cpp b/src/graph/backends/CL/CLNodeValidator.cpp
index 851285630e..c16b2e67df 100644
--- a/src/graph2/backends/CL/CLNodeValidator.cpp
+++ b/src/graph/backends/CL/CLNodeValidator.cpp
@@ -21,10 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/CL/CLNodeValidator.h"
+#include "arm_compute/graph/backends/CL/CLNodeValidator.h"
-#include "arm_compute/graph2/backends/ValidateHelpers.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/backends/ValidateHelpers.h"
+#include "arm_compute/graph/nodes/Nodes.h"
#include "arm_compute/core/utils/misc/Cast.h"
#include "arm_compute/runtime/CL/CLFunctions.h"
@@ -33,7 +33,7 @@ using namespace arm_compute::utils::cast;
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -60,5 +60,5 @@ Status CLNodeValidator::validate(INode *node)
}
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/CL/CLSubTensorHandle.cpp b/src/graph/backends/CL/CLSubTensorHandle.cpp
index 65a1ba4d5f..a1bc8a1dd3 100644
--- a/src/graph2/backends/CL/CLSubTensorHandle.cpp
+++ b/src/graph/backends/CL/CLSubTensorHandle.cpp
@@ -21,13 +21,13 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
+#include "arm_compute/graph/backends/CL/CLSubTensorHandle.h"
#include "arm_compute/core/utils/misc/Cast.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -74,5 +74,5 @@ bool CLSubTensorHandle::is_subtensor() const
return true;
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/CL/CLTensorHandle.cpp b/src/graph/backends/CL/CLTensorHandle.cpp
index 89678fb280..563c4d9ac6 100644
--- a/src/graph2/backends/CL/CLTensorHandle.cpp
+++ b/src/graph/backends/CL/CLTensorHandle.cpp
@@ -21,11 +21,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
+#include "arm_compute/graph/backends/CL/CLTensorHandle.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -74,5 +74,5 @@ bool CLTensorHandle::is_subtensor() const
return false;
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/GLES/GCDeviceBackend.cpp b/src/graph/backends/GLES/GCDeviceBackend.cpp
index 7dab422a82..8cd9994744 100644
--- a/src/graph2/backends/GLES/GCDeviceBackend.cpp
+++ b/src/graph/backends/GLES/GCDeviceBackend.cpp
@@ -21,17 +21,17 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/GLES/GCDeviceBackend.h"
-
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/INode.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/backends/BackendRegistrar.h"
-#include "arm_compute/graph2/backends/GLES/GCFunctionFactory.h"
-#include "arm_compute/graph2/backends/GLES/GCNodeValidator.h"
-#include "arm_compute/graph2/backends/GLES/GCTensorHandle.h"
+#include "arm_compute/graph/backends/GLES/GCDeviceBackend.h"
+
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/backends/BackendRegistrar.h"
+#include "arm_compute/graph/backends/GLES/GCFunctionFactory.h"
+#include "arm_compute/graph/backends/GLES/GCNodeValidator.h"
+#include "arm_compute/graph/backends/GLES/GCTensorHandle.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/BlobLifetimeManager.h"
@@ -44,7 +44,7 @@
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -75,6 +75,11 @@ void GCDeviceBackend::setup_backend_context(GraphContext &ctx)
}
}
+bool GCDeviceBackend::is_backend_supported()
+{
+ return arm_compute::opengles31_is_available();
+}
+
std::unique_ptr<ITensorHandle> GCDeviceBackend::create_tensor(const Tensor &tensor)
{
// Get tensor descriptor
@@ -129,5 +134,5 @@ std::shared_ptr<arm_compute::IMemoryManager> GCDeviceBackend::create_memory_mana
return mm;
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/GLES/GCFunctionsFactory.cpp b/src/graph/backends/GLES/GCFunctionsFactory.cpp
index 24ab2bce37..12e7c042d4 100644
--- a/src/graph2/backends/GLES/GCFunctionsFactory.cpp
+++ b/src/graph/backends/GLES/GCFunctionsFactory.cpp
@@ -21,16 +21,16 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/GLES/GCFunctionFactory.h"
+#include "arm_compute/graph/backends/GLES/GCFunctionFactory.h"
#include "arm_compute/core/utils/misc/Cast.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/TypePrinter.h"
-#include "arm_compute/graph2/Types.h"
-#include "arm_compute/graph2/backends/Utils.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/TypePrinter.h"
+#include "arm_compute/graph/Types.h"
+#include "arm_compute/graph/backends/Utils.h"
+#include "arm_compute/graph/nodes/Nodes.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCFunctions.h"
#include "support/ToolchainSupport.h"
@@ -39,7 +39,7 @@ using namespace arm_compute::utils::cast;
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -51,12 +51,12 @@ namespace
*
* @return Backing tensor if present else nullptr
*/
-arm_compute::IGCTensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+arm_compute::IGCTensor *get_backing_tensor(arm_compute::graph::Tensor *tensor)
{
arm_compute::IGCTensor *backing_tensor = nullptr;
if(tensor != nullptr)
{
- ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph2::Target::GC);
+ ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph::Target::GC);
// Get backing tensor handle
ITensorHandle *tensor_handle = tensor->handle();
// Get backing tensor
@@ -503,5 +503,5 @@ std::unique_ptr<IFunction> GCFunctionFactory::create(INode *node, GraphContext &
}
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/GLES/GCNodeValidator.cpp b/src/graph/backends/GLES/GCNodeValidator.cpp
index b8daae566d..1e89265382 100644
--- a/src/graph2/backends/GLES/GCNodeValidator.cpp
+++ b/src/graph/backends/GLES/GCNodeValidator.cpp
@@ -21,10 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/GLES/GCNodeValidator.h"
+#include "arm_compute/graph/backends/GLES/GCNodeValidator.h"
-#include "arm_compute/graph2/backends/ValidateHelpers.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/backends/ValidateHelpers.h"
+#include "arm_compute/graph/nodes/Nodes.h"
#include "arm_compute/core/utils/misc/Cast.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCFunctions.h"
@@ -33,7 +33,7 @@ using namespace arm_compute::utils::cast;
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -118,5 +118,5 @@ Status GCNodeValidator::validate(INode *node)
}
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/GLES/GCTensorHandle.cpp b/src/graph/backends/GLES/GCTensorHandle.cpp
index 2165cd2de6..ae7c778130 100644
--- a/src/graph2/backends/GLES/GCTensorHandle.cpp
+++ b/src/graph/backends/GLES/GCTensorHandle.cpp
@@ -21,11 +21,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/GLES/GCTensorHandle.h"
+#include "arm_compute/graph/backends/GLES/GCTensorHandle.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -74,5 +74,5 @@ bool GCTensorHandle::is_subtensor() const
return false;
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
index 6cb507b4f1..87f88dffdf 100644
--- a/src/graph2/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -21,18 +21,18 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/NEON/NEDeviceBackend.h"
-
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/INode.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/backends/BackendRegistrar.h"
-#include "arm_compute/graph2/backends/NEON/NEFunctionFactory.h"
-#include "arm_compute/graph2/backends/NEON/NENodeValidator.h"
-#include "arm_compute/graph2/backends/NEON/NESubTensorHandle.h"
-#include "arm_compute/graph2/backends/NEON/NETensorHandle.h"
+#include "arm_compute/graph/backends/NEON/NEDeviceBackend.h"
+
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/backends/BackendRegistrar.h"
+#include "arm_compute/graph/backends/NEON/NEFunctionFactory.h"
+#include "arm_compute/graph/backends/NEON/NENodeValidator.h"
+#include "arm_compute/graph/backends/NEON/NESubTensorHandle.h"
+#include "arm_compute/graph/backends/NEON/NETensorHandle.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/Allocator.h"
@@ -46,7 +46,7 @@
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -78,6 +78,11 @@ void NEDeviceBackend::setup_backend_context(GraphContext &ctx)
}
}
+bool NEDeviceBackend::is_backend_supported()
+{
+ return true;
+}
+
std::unique_ptr<ITensorHandle> NEDeviceBackend::create_tensor(const Tensor &tensor)
{
// Get tensor descriptor
@@ -137,5 +142,5 @@ std::shared_ptr<arm_compute::IMemoryManager> NEDeviceBackend::create_memory_mana
return mm;
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index 933210377d..228af9ca6f 100644
--- a/src/graph2/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -21,15 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/NEON/NEFunctionFactory.h"
+#include "arm_compute/graph/backends/NEON/NEFunctionFactory.h"
#include "arm_compute/core/utils/misc/Cast.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/TypePrinter.h"
-#include "arm_compute/graph2/backends/Utils.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/TypePrinter.h"
+#include "arm_compute/graph/backends/Utils.h"
+#include "arm_compute/graph/nodes/Nodes.h"
#include "arm_compute/runtime/NEON/NEFunctions.h"
#include "support/ToolchainSupport.h"
@@ -37,7 +37,7 @@ using namespace arm_compute::utils::cast;
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -49,7 +49,7 @@ namespace
*
* @return Backing tensor if present else nullptr
*/
-arm_compute::ITensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+arm_compute::ITensor *get_backing_tensor(arm_compute::graph::Tensor *tensor)
{
return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : &tensor->handle()->tensor();
}
@@ -559,5 +559,5 @@ std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &
}
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index 4620f4cd87..074f03580f 100644
--- a/src/graph2/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -21,10 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/NEON/NENodeValidator.h"
+#include "arm_compute/graph/backends/NEON/NENodeValidator.h"
-#include "arm_compute/graph2/backends/ValidateHelpers.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/backends/ValidateHelpers.h"
+#include "arm_compute/graph/nodes/Nodes.h"
#include "arm_compute/core/utils/misc/Cast.h"
#include "arm_compute/runtime/NEON/NEFunctions.h"
@@ -33,7 +33,7 @@ using namespace arm_compute::utils::cast;
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -61,5 +61,5 @@ Status NENodeValidator::validate(INode *node)
}
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/NEON/NESubTensorHandle.cpp b/src/graph/backends/NEON/NESubTensorHandle.cpp
index 1cd15be29c..c48ba6b9d6 100644
--- a/src/graph2/backends/NEON/NESubTensorHandle.cpp
+++ b/src/graph/backends/NEON/NESubTensorHandle.cpp
@@ -21,11 +21,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/NEON/NESubTensorHandle.h"
+#include "arm_compute/graph/backends/NEON/NESubTensorHandle.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -71,5 +71,5 @@ bool NESubTensorHandle::is_subtensor() const
return true;
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/backends/NEON/NETensorHandle.cpp b/src/graph/backends/NEON/NETensorHandle.cpp
index 0b901c3497..8508ac9511 100644
--- a/src/graph2/backends/NEON/NETensorHandle.cpp
+++ b/src/graph/backends/NEON/NETensorHandle.cpp
@@ -21,11 +21,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/backends/NEON/NETensorHandle.h"
+#include "arm_compute/graph/backends/NEON/NETensorHandle.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace backends
{
@@ -73,5 +73,5 @@ bool NETensorHandle::is_subtensor() const
return false;
}
} // namespace backends
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/detail/ExecutionHelpers.cpp b/src/graph/detail/ExecutionHelpers.cpp
index 3688d0b0dc..5a50728164 100644
--- a/src/graph2/detail/ExecutionHelpers.cpp
+++ b/src/graph/detail/ExecutionHelpers.cpp
@@ -21,17 +21,17 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/detail/ExecutionHelpers.h"
+#include "arm_compute/graph/detail/ExecutionHelpers.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/GraphManager.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/backends/BackendRegistry.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/GraphManager.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace detail
{
@@ -195,5 +195,5 @@ void call_all_output_node_accessors(ExecutionWorkload &workload)
}
}
} // namespace detail
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/frontend/Stream.cpp b/src/graph/frontend/Stream.cpp
index 4e794f28df..96a166c79c 100644
--- a/src/graph2/frontend/Stream.cpp
+++ b/src/graph/frontend/Stream.cpp
@@ -21,14 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/frontend/Stream.h"
+#include "arm_compute/graph/frontend/Stream.h"
-#include "arm_compute/graph2/Utils.h"
-#include "arm_compute/graph2/frontend/ILayer.h"
+#include "arm_compute/graph/Utils.h"
+#include "arm_compute/graph/frontend/ILayer.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace frontend
{
@@ -65,5 +65,5 @@ Graph &Stream::graph()
return _g;
}
} // namespace frontend
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/frontend/SubStream.cpp b/src/graph/frontend/SubStream.cpp
index e6fa605ad1..e8bd23a557 100644
--- a/src/graph2/frontend/SubStream.cpp
+++ b/src/graph/frontend/SubStream.cpp
@@ -21,14 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/frontend/SubStream.h"
+#include "arm_compute/graph/frontend/SubStream.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/frontend/ILayer.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/frontend/ILayer.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace frontend
{
@@ -55,5 +55,5 @@ Graph &SubStream::graph()
return _s.graph();
}
} // namespace frontend
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph2/mutators/DepthConcatSubTensorMutator.cpp b/src/graph/mutators/DepthConcatSubTensorMutator.cpp
index ea3743bf21..c56f4c5106 100644
--- a/src/graph2/mutators/DepthConcatSubTensorMutator.cpp
+++ b/src/graph/mutators/DepthConcatSubTensorMutator.cpp
@@ -21,19 +21,19 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/mutators/DepthConcatSubTensorMutator.h"
+#include "arm_compute/graph/mutators/DepthConcatSubTensorMutator.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/backends/BackendRegistry.h"
-#include "arm_compute/graph2/nodes/DepthConcatenateLayerNode.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
+#include "arm_compute/graph/nodes/DepthConcatenateLayerNode.h"
#include "arm_compute/core/utils/misc/Cast.h"
#include "arm_compute/core/utils/misc/Iterable.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
const char *DepthConcatSubTensorMutator::name()
{
@@ -82,5 +82,5 @@ void DepthConcatSubTensorMutator::mutate(Graph &g)
}
}
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph2/mutators/InPlaceOperationMutator.cpp b/src/graph/mutators/InPlaceOperationMutator.cpp
index bb13e98999..bd3f098965 100644
--- a/src/graph2/mutators/InPlaceOperationMutator.cpp
+++ b/src/graph/mutators/InPlaceOperationMutator.cpp
@@ -21,14 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/mutators/InPlaceOperationMutator.h"
+#include "arm_compute/graph/mutators/InPlaceOperationMutator.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Logger.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
const char *InPlaceOperationMutator::name()
{
@@ -59,5 +59,5 @@ void InPlaceOperationMutator::mutate(Graph &g)
}
}
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph2/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp
index d0ab3e7e6b..2e893c2e07 100644
--- a/src/graph2/mutators/NodeFusionMutator.cpp
+++ b/src/graph/mutators/NodeFusionMutator.cpp
@@ -21,17 +21,17 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/mutators/NodeFusionMutator.h"
+#include "arm_compute/graph/mutators/NodeFusionMutator.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/nodes/Nodes.h"
#include "arm_compute/core/utils/misc/Cast.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
namespace detail
{
@@ -92,5 +92,5 @@ void NodeFusionMutator::mutate(Graph &g)
{
detail::fuse_batch_norm_with_activation(g);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph2/mutators/SplitLayerSubTensorMutator.cpp b/src/graph/mutators/SplitLayerSubTensorMutator.cpp
index 33494ba6bc..179a6c35fb 100644
--- a/src/graph2/mutators/SplitLayerSubTensorMutator.cpp
+++ b/src/graph/mutators/SplitLayerSubTensorMutator.cpp
@@ -21,19 +21,19 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/mutators/SplitLayerSubTensorMutator.h"
+#include "arm_compute/graph/mutators/SplitLayerSubTensorMutator.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/backends/BackendRegistry.h"
-#include "arm_compute/graph2/nodes/SplitLayerNode.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
+#include "arm_compute/graph/nodes/SplitLayerNode.h"
#include "arm_compute/core/utils/misc/Cast.h"
#include "arm_compute/core/utils/misc/Iterable.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
const char *SplitLayerSubTensorMutator::name()
{
@@ -85,5 +85,5 @@ void SplitLayerSubTensorMutator::mutate(Graph &g)
}
}
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp
deleted file mode 100644
index 546c42a1e5..0000000000
--- a/src/graph/nodes/ActivationLayer.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/ActivationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-ActivationLayer::ActivationLayer(const ActivationLayerInfo activation_info)
- : _activation_info(activation_info)
-{
- set_supports_in_place(true);
-}
-
-std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::ActivationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<ActivationLayerInfo>("ActivationLayerInfo", _activation_info);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::ActivationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/ActivationLayerNode.cpp b/src/graph/nodes/ActivationLayerNode.cpp
index c7c36e9bbd..9996d2ce3f 100644
--- a/src/graph2/nodes/ActivationLayerNode.cpp
+++ b/src/graph/nodes/ActivationLayerNode.cpp
@@ -21,14 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/ActivationLayerNode.h"
+#include "arm_compute/graph/nodes/ActivationLayerNode.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
ActivationLayerNode::ActivationLayerNode(ActivationLayerInfo info)
: _info(info)
@@ -79,5 +79,5 @@ void ActivationLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/BatchNormalizationLayer.cpp b/src/graph/nodes/BatchNormalizationLayer.cpp
deleted file mode 100644
index 24287ac61a..0000000000
--- a/src/graph/nodes/BatchNormalizationLayer.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/BatchNormalizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> BatchNormalizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- unsigned int batch_norm_size = in->info()->dimension(2);
- if(_mean.tensor() == nullptr)
- {
- _mean.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
- if(_var.tensor() == nullptr)
- {
- _var.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
- if(_beta.tensor() == nullptr)
- {
- _beta.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
- if(_gamma.tensor() == nullptr)
- {
- _gamma.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
-
- bool mean_is_loaded = _mean.tensor() != nullptr;
- bool var_is_loaded = _var.tensor() != nullptr;
- bool gamma_is_loaded = _gamma.tensor() != nullptr;
- bool beta_is_loaded = _beta.tensor() != nullptr;
-
- // Set mean, var, gamma and beta target
- _mean.set_target(_target_hint);
- _var.set_target(_target_hint);
- _gamma.set_target(_target_hint);
- _beta.set_target(_target_hint);
-
- // Create node context
- NodeContext node_ctx(OperationType::BatchNormalizationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_input(_mean.tensor());
- node_ctx.add_input(_var.tensor());
- node_ctx.add_input(_beta.tensor());
- node_ctx.add_input(_gamma.tensor());
- node_ctx.add_output(out);
- node_ctx.add_parameter<float>("epsilon", _epsilon);
- node_ctx.add_parameter<ActivationLayerInfo>("act_info", _act_info);
-
- // Configure operation
- auto func = OperationRegistry::get().find_operation(OperationType::BatchNormalizationLayer, _target_hint)->configure(node_ctx);
-
- // Fill tensors
- if(!mean_is_loaded)
- {
- _mean.allocate_and_fill_if_needed();
- }
- if(!var_is_loaded)
- {
- _var.allocate_and_fill_if_needed();
- }
- if(!gamma_is_loaded)
- {
- _gamma.allocate_and_fill_if_needed();
- }
- if(!beta_is_loaded)
- {
- _beta.allocate_and_fill_if_needed();
- }
-
- // Get function
- return func;
-} \ No newline at end of file
diff --git a/src/graph2/nodes/BatchNormalizationLayerNode.cpp b/src/graph/nodes/BatchNormalizationLayerNode.cpp
index b9f634210c..f7b041c828 100644
--- a/src/graph2/nodes/BatchNormalizationLayerNode.cpp
+++ b/src/graph/nodes/BatchNormalizationLayerNode.cpp
@@ -21,15 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/BatchNormalizationLayerNode.h"
+#include "arm_compute/graph/nodes/BatchNormalizationLayerNode.h"
#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
BatchNormalizationLayerNode::BatchNormalizationLayerNode(float epsilon, ActivationLayerInfo fused_activation)
: _epsilon(epsilon), _fused_activation(fused_activation)
@@ -90,5 +90,5 @@ void BatchNormalizationLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/BranchLayer.cpp b/src/graph/nodes/BranchLayer.cpp
deleted file mode 100644
index 7a20a565b8..0000000000
--- a/src/graph/nodes/BranchLayer.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/BranchLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/SubGraph.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
-
-#include <memory>
-#include <tuple>
-#include <vector>
-
-using namespace arm_compute::graph;
-
-/** Branch function */
-class BranchFunction final : public arm_compute::IFunction
-{
-public:
- /** Default Constructor */
- BranchFunction()
- : _graphs()
- {
- }
- /** Registers graph to be executed by the branch function
- *
- * @param[in] graph Graph to register
- */
- void register_graph(std::unique_ptr<Graph> graph)
- {
- _graphs.push_back(std::move(graph));
- }
- // Inherited methods overriden:
- void run() override
- {
- for(auto &g : _graphs)
- {
- ARM_COMPUTE_ERROR_ON(g.get() == nullptr);
- g->run();
- }
- }
-
-private:
- std::vector<std::unique_ptr<Graph>> _graphs;
-};
-
-std::unique_ptr<arm_compute::IFunction> BranchLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON(_branch_merge_method != BranchMergeMethod::DEPTH_CONCATENATE);
- ARM_COMPUTE_UNUSED(_branch_merge_method);
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- // Create branch function
- auto func = arm_compute::support::cpp14::make_unique<BranchFunction>();
-
- // Track output depth
- int depth = 0;
-
- // Constuct all sub-graphs given the input/output
- for(auto &sg : _sub_graphs)
- {
- ARM_COMPUTE_ERROR_ON(sg.get() == nullptr);
-
- // IO buffers
- std::unique_ptr<ITensorObject> in;
- std::unique_ptr<ITensorObject> out;
- SubTensor *out_sub_tensor = nullptr;
-
- // Create input sub-tensor
- if(!sg->has_input())
- {
- ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(input) == nullptr);
- in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
- input->tensor()->info()->tensor_shape(),
- Coordinates());
- }
-
- // Create output sub-tensor
- if(!sg->has_output())
- {
- ARM_COMPUTE_ERROR_ON((dynamic_cast<Tensor *>(output) == nullptr) && (dynamic_cast<SubTensor *>(output) == nullptr));
-
- out = arm_compute::support::cpp14::make_unique<SubTensor>(output->tensor(),
- TensorShape(),
- Coordinates(0, 0, depth),
- output->target(),
- true);
- out_sub_tensor = dynamic_cast<SubTensor *>(out.get());
- }
-
- // Construct sub_graph
- auto g = sg->construct(ctx, std::move(in), std::move(out));
-
- // Register graph to function
- func->register_graph(std::move(g));
-
- // Update and track depth
- if(out_sub_tensor != nullptr)
- {
- ARM_COMPUTE_ERROR_ON(out_sub_tensor->tensor() == nullptr);
- depth += out_sub_tensor->tensor()->info()->tensor_shape()[2];
- }
- }
-
- return std::move(func);
-} \ No newline at end of file
diff --git a/src/graph2/nodes/ConstNode.cpp b/src/graph/nodes/ConstNode.cpp
index 5bd6a8180c..631971c98f 100644
--- a/src/graph2/nodes/ConstNode.cpp
+++ b/src/graph/nodes/ConstNode.cpp
@@ -21,14 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/ConstNode.h"
+#include "arm_compute/graph/nodes/ConstNode.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
ConstNode::ConstNode(TensorDescriptor desc)
: _desc(desc)
@@ -68,5 +68,5 @@ void ConstNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
deleted file mode 100644
index 5b3a84a4ad..0000000000
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/ConvolutionLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEWinogradLayer.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphTypePrinter.h"
-#include "utils/TypePrinter.h"
-
-#include <tuple>
-#include <vector>
-
-using namespace arm_compute::graph;
-
-namespace
-{
-/** Calculates the output shaped of the convolution layer
- *
- * @param[in] input_shape Input tensor shape
- * @param[in] weights_shape Weights shape
- * @param[in] conv_info Convolution information (padding, stride, etc.)
- *
- * @return The expected output tensor shape
- */
-TensorShape calculate_convolution_layer_output_shape(const TensorShape &input_shape, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
-{
- unsigned int output_width = 0;
- unsigned int output_height = 0;
-
- // Get output width and height
- std::tie(output_width, output_height) = arm_compute::scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), conv_info);
-
- // Create output shape
- TensorShape output_shape = input_shape;
- output_shape.set(0, output_width);
- output_shape.set(1, output_height);
- output_shape.set(2, weights_shape[3]);
-
- return output_shape;
-}
-
-// Instantiate GEMM based convolution layer
-template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
-{
- auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
- conv->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(weights),
- dynamic_cast<TensorType *>(biases),
- dynamic_cast<TensorType *>(output),
- conv_info, weights_info);
- return std::move(conv);
-}
-
-// Instantiate direct convolution layer
-template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info)
-{
- auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
- conv->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(weights),
- dynamic_cast<TensorType *>(biases),
- dynamic_cast<TensorType *>(output),
- conv_info);
- return std::move(conv);
-}
-
-template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method)
-{
- if((conv_method == ConvolutionMethodHint::WINOGRAD)
- && arm_compute::CLWinogradConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLWinogradConvolutionLayer");
- return instantiate_direct_function<arm_compute::CLWinogradConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
- }
- else if((conv_method == ConvolutionMethodHint::DIRECT)
- && arm_compute::CLDirectConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDirectConvolutionLayer");
- return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
- }
- else
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLConvolutionLayer");
- return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
- }
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
- const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method)
-{
- const unsigned int kernel_size_x = weights->info()->tensor_shape().x();
- const unsigned int kernel_size_y = weights->info()->tensor_shape().y();
- const unsigned int conv_stride_x = conv_info.stride().first;
- const unsigned int conv_stride_y = conv_info.stride().second;
-
- bool is_square_kernel = (kernel_size_x == kernel_size_y);
- bool has_same_stride = (conv_stride_x == conv_stride_y);
-
- // TODO (COMPID-765) : Winograd should have a validate function
- if(conv_method == ConvolutionMethodHint::WINOGRAD && is_square_kernel && ((kernel_size_x == 3) || (kernel_size_x == 5)) && has_same_stride && (conv_info.stride().first == 1))
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEWinogradConvolutionLayer");
- return instantiate_direct_function<arm_compute::NEWinogradLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
- }
- else if((conv_method == ConvolutionMethodHint::DIRECT)
- && arm_compute::NEDirectConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDirectConvolutionLayer");
- return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
- }
- else
- {
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEConvolutionLayer");
- return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info, weights_info);
- }
-}
-} // namespace
-
-/** Grouped Convolution function */
-class GroupedConvolutionFunction final : public arm_compute::IFunction
-{
-public:
- /** Default Constructor */
- GroupedConvolutionFunction() = default;
- /** Default Destructor */
- ~GroupedConvolutionFunction() final = default;
- /** Prevent instances from being copy constructed */
- GroupedConvolutionFunction(const GroupedConvolutionFunction &) = delete;
- /** Prevent instances from being copy assigned */
- GroupedConvolutionFunction &operator=(const GroupedConvolutionFunction &) = delete;
- /** Allow instances to be move constructed */
- GroupedConvolutionFunction(GroupedConvolutionFunction &&) noexcept = default;
- /** Allow instances to be move assigned */
- GroupedConvolutionFunction &operator=(GroupedConvolutionFunction &&) noexcept = default;
- /** Adds a convolution
- *
- * @param convolution Convolution function to add
- */
- void add_convolution_function(std::unique_ptr<IFunction> convolution) // NOLINT
- {
- _convolutions.emplace_back(std::move(convolution));
- }
-
- // Inherited methods overridden:
- void run() override
- {
- for(auto &c : _convolutions)
- {
- c->run();
- }
- }
-
-private:
- std::vector<std::unique_ptr<IFunction>> _convolutions{};
-};
-
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
-
- // Set weights and biases info
- if(_weights.tensor() == nullptr)
- {
- TensorInfo info = TensorInfo(TensorShape(_conv_width, _conv_height, in->info()->dimension(2) / _num_groups, _ofm),
- in->info()->num_channels(),
- in->info()->data_type(),
- in->info()->fixed_point_position());
- info.set_quantization_info(_weights_quant_info);
- _weights.set_info(std::move(info));
- }
- if(_biases.has_accessor() && _biases.tensor() == nullptr)
- {
- DataType dt = in->info()->data_type();
- _biases.set_info(TensorInfo(TensorShape(_ofm), in->info()->num_channels(), is_data_type_quantized_asymmetric(dt) ? DataType::S32 : dt, in->info()->fixed_point_position()));
- }
-
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
- const ConvolutionMethodHint conv_method_hint = ctx.hints().convolution_method_hint();
-
- // Check if the weights and biases are loaded
- bool weights_are_loaded = _weights.tensor() != nullptr;
- bool biases_are_loaded = _biases.has_accessor() ? _biases.tensor() != nullptr : true;
-
- // Set bias and weights target
- _weights.set_target(_target_hint);
- if(_biases.has_accessor())
- {
- _biases.set_target(_target_hint);
- }
-
- // Calculate output shape
- TensorShape output_shape = calculate_convolution_layer_output_shape(in->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
-
- // Output auto inizialitation if not yet initialized
- arm_compute::auto_init_if_empty(*out->info(), output_shape, 1, in->info()->data_type(), in->info()->fixed_point_position(),
- (_out_quant_info.empty()) ? in->info()->quantization_info() : _out_quant_info);
-
- // Create appropriate convolution function
- if(_num_groups == 1)
- {
- func = instantiate_convolution(in, out, conv_method_hint);
- }
- else
- {
- func = instantiate_grouped_convolution(in, out, conv_method_hint);
- }
-
- // Fill weights
- if(!weights_are_loaded)
- {
- _weights.allocate_and_fill_if_needed();
- }
- // Fill biases
- if(!biases_are_loaded)
- {
- _biases.allocate_and_fill_if_needed();
- }
-
- ARM_COMPUTE_LOG_GRAPH_INFO(" Data Type: " << in->info()->data_type()
- << " Input Shape: " << in->info()->tensor_shape()
- << " Weights shape: " << _weights.info().tensor_shape()
- << " Biases Shape: " << _biases.info().tensor_shape()
- << " Output Shape: " << out->info()->tensor_shape()
- << " PadStrideInfo: " << _conv_info
- << " Groups: " << _num_groups
- << " WeightsInfo: " << _weights_info
- << std::endl);
-
- return func;
-}
-
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint)
-{
- std::unique_ptr<arm_compute::IFunction> func;
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
- }
- else
- {
- func = instantiate<TargetHint::NEON>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
- }
- return func;
-}
-
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint)
-{
- // Get tensor shapes
- TensorShape input_shape = input->info()->tensor_shape();
- TensorShape output_shape = output->info()->tensor_shape();
- TensorShape weights_shape = _weights.info().tensor_shape();
- TensorShape biases_shape = _biases.info().tensor_shape();
-
- ARM_COMPUTE_ERROR_ON_MSG((input_shape.z() % _num_groups) != 0, "Input depth not multiple of the number of groups!");
- ARM_COMPUTE_ERROR_ON_MSG((output_shape.z() % _num_groups) != 0, "Output depth not multiple of the number of groups!");
- ARM_COMPUTE_ERROR_ON_MSG((weights_shape[3] % _num_groups) != 0, "Number of kernels not multiple of the number of groups!");
- ARM_COMPUTE_ERROR_ON_MSG((biases_shape.x() % _num_groups) != 0, "Biases not multiple of the number of groups!");
-
- // Create a grouped convolution function
- auto grouped_conv = arm_compute::support::cpp14::make_unique<GroupedConvolutionFunction>();
-
- // Create sub-tensors vectors
- _is = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
- _os = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
- _ws = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
- _bs = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
-
- // Calculate sub-tensor splits
- const int input_split = input_shape.z() / _num_groups;
- const int output_split = output_shape.z() / _num_groups;
- const int weights_split = weights_shape[3] / _num_groups;
- const int biases_split = biases_shape.x() / _num_groups;
-
- // Calculate sub-tensor shapes
- input_shape.set(2, input_split);
- output_shape.set(2, output_split);
- weights_shape.set(3, weights_split);
- biases_shape.set(0, biases_split);
-
- // Configure sub-tensors
- for(int i = 0; i < static_cast<int>(_num_groups); ++i)
- {
- // Create convolution function
- std::unique_ptr<arm_compute::IFunction> func;
-
- // Calculate sub-tensors starting coordinates
- Coordinates input_coord(0, 0, input_split * i);
- Coordinates output_coord(0, 0, output_split * i);
- Coordinates weights_coord(0, 0, 0, weights_split * i);
- Coordinates biases_coord(biases_split * i);
-
- // Create sub-tensors for input, output, weights and bias
- auto hint_to_use = (_target_hint == TargetHint::OPENCL) ? TargetHint::OPENCL : TargetHint::NEON;
- _is[i] = SubTensor(input, input_shape, input_coord, hint_to_use);
- _os[i] = SubTensor(output, output_shape, output_coord, hint_to_use);
- _ws[i] = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
- _bs[i] = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
-
- // Instantiate convolution function
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
- }
- else
- {
- func = instantiate<TargetHint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
- }
-
- // Add convolution function to the list of convolutions for the grouped convolution
- grouped_conv->add_convolution_function(std::move(func));
- }
-
- return std::move(grouped_conv);
-}
diff --git a/src/graph2/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
index 499b3520b2..461728487f 100644
--- a/src/graph2/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -21,15 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/ConvolutionLayerNode.h"
+#include "arm_compute/graph/nodes/ConvolutionLayerNode.h"
#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method)
: _info(std::move(info)), _method(method)
@@ -107,5 +107,5 @@ void ConvolutionLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/DeQuantizationLayer.cpp b/src/graph/nodes/DeQuantizationLayer.cpp
deleted file mode 100644
index af9ecee157..0000000000
--- a/src/graph/nodes/DeQuantizationLayer.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/DequantizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> DequantizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
-
- if(_min_max.tensor() == nullptr)
- {
- TensorShape shape = in->info()->tensor_shape();
- shape.set(Window::DimX, 2);
- shape.remove_dimension(1);
- shape.remove_dimension(1);
-
- _min_max.set_info(TensorInfo(shape, in->info()->num_channels(), DataType::F32));
- _min_max.set_target(_target_hint);
- }
-
- bool minmax_is_loaded = _min_max.tensor() != nullptr;
-
- // Create node context
- NodeContext node_ctx(OperationType::DequantizationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(_min_max.tensor());
- node_ctx.add_output(out);
-
- // Fill min max
- if(!minmax_is_loaded)
- {
- _min_max.allocate_and_fill_if_needed();
- }
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::DequantizationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/DepthConcatenateLayerNode.cpp b/src/graph/nodes/DepthConcatenateLayerNode.cpp
index dcd66517b6..1c0539744f 100644
--- a/src/graph2/nodes/DepthConcatenateLayerNode.cpp
+++ b/src/graph/nodes/DepthConcatenateLayerNode.cpp
@@ -21,15 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/DepthConcatenateLayerNode.h"
+#include "arm_compute/graph/nodes/DepthConcatenateLayerNode.h"
#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
DepthConcatenateLayerNode::DepthConcatenateLayerNode(unsigned int total_nodes)
: _total_nodes(total_nodes), _is_enabled(true)
@@ -129,5 +129,5 @@ void DepthConcatenateLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/DepthConvertLayer.cpp b/src/graph/nodes/DepthConvertLayer.cpp
deleted file mode 100644
index 9b328e7b3e..0000000000
--- a/src/graph/nodes/DepthConvertLayer.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/DepthConvertLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-DepthConvertLayer::DepthConvertLayer(const ConvertPolicy policy, uint32_t shift, DataType output_datatype)
- : _policy(policy), _shift(shift), _output_datatype(output_datatype)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> DepthConvertLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
-
- // Auto configure output
- arm_compute::auto_init_if_empty(*out->info(), in->info()->tensor_shape(), 1, _output_datatype, in->info()->fixed_point_position());
-
- // Create node context
- NodeContext node_ctx(OperationType::DepthConvertLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<ConvertPolicy>("ConvertPolicy", _policy);
- node_ctx.add_parameter<uint32_t>("shift", _shift);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::DepthConvertLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/DepthwiseConvolutionLayer.cpp b/src/graph/nodes/DepthwiseConvolutionLayer.cpp
deleted file mode 100644
index e5101cc33c..0000000000
--- a/src/graph/nodes/DepthwiseConvolutionLayer.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/DepthwiseConvolutionLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> DepthwiseConvolutionLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- if(_weights.tensor() == nullptr)
- {
- TensorShape weights_shape(_conv_width, _conv_height, input->tensor()->info()->tensor_shape().z());
- TensorInfo info = TensorInfo(TensorShape(weights_shape), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position());
- info.set_quantization_info(_quant_info);
- _weights.set_info(std::move(info));
- }
- if(_biases.has_accessor() && _biases.tensor() == nullptr)
- {
- DataType dt = in->info()->data_type();
- _biases.set_info(TensorInfo(TensorShape(in->info()->dimension(2)), in->info()->num_channels(), is_data_type_quantized_asymmetric(dt) ? DataType::S32 : dt, in->info()->fixed_point_position()));
- }
-
- bool weights_is_loaded = _weights.tensor() != nullptr;
- bool biases_is_loaded = _biases.has_accessor() ? _biases.tensor() != nullptr : true;
-
- _weights.set_target(_target_hint);
- if(_biases.has_accessor())
- {
- _biases.set_target(_target_hint);
- }
-
- // Create node context
- NodeContext node_ctx(OperationType::DepthwiseConvolutionLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_input(_weights.tensor());
- if(_biases.has_accessor())
- {
- node_ctx.add_input(_biases.tensor());
- }
- node_ctx.add_output(out);
- node_ctx.add_parameter<PadStrideInfo>("ConvolutionInfo", _conv_info);
- node_ctx.add_parameter<bool>("Optimized3x3", _opt3x3);
-
- // Configure operation
- auto func = OperationRegistry::get().find_operation(OperationType::DepthwiseConvolutionLayer, _target_hint)->configure(node_ctx);
-
- // Fill tensors
- if(!weights_is_loaded)
- {
- _weights.allocate_and_fill_if_needed();
- }
- if(!biases_is_loaded)
- {
- _biases.allocate_and_fill_if_needed();
- }
-
- // Get function
- return func;
-}
diff --git a/src/graph2/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
index b030e8b7ca..67a39029e6 100644
--- a/src/graph2/nodes/DepthwiseConvolutionLayerNode.cpp
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -21,15 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/DepthwiseConvolutionLayerNode.h"
+#include "arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h"
#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
DepthwiseConvolutionLayerNode::DepthwiseConvolutionLayerNode(PadStrideInfo info, DepthwiseConvolutionMethod method)
: _info(std::move(info)), _method(method)
@@ -106,5 +106,5 @@ void DepthwiseConvolutionLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/nodes/EltwiseLayerNode.cpp b/src/graph/nodes/EltwiseLayerNode.cpp
index 149d926d29..b794043f2f 100644
--- a/src/graph2/nodes/EltwiseLayerNode.cpp
+++ b/src/graph/nodes/EltwiseLayerNode.cpp
@@ -21,14 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/EltwiseLayerNode.h"
+#include "arm_compute/graph/nodes/EltwiseLayerNode.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
EltwiseLayerNode::EltwiseLayerNode(EltwiseOperation op)
: _op(op)
@@ -79,5 +79,5 @@ void EltwiseLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph/nodes/FlattenLayer.cpp b/src/graph/nodes/FlattenLayer.cpp
deleted file mode 100644
index ea08296ba3..0000000000
--- a/src/graph/nodes/FlattenLayer.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/FlattenLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> FlattenLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
-
- // Auto configure output
- TensorShape tensor_shape = in->info()->tensor_shape();
- tensor_shape.collapse(in->info()->num_dimensions());
- arm_compute::auto_init_if_empty(*out->info(), tensor_shape, 1, in->info()->data_type(), in->info()->fixed_point_position());
-
- // Create node context
- NodeContext node_ctx(OperationType::FlattenLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::FlattenLayer, _target_hint)->configure(node_ctx);
-} \ No newline at end of file
diff --git a/src/graph2/nodes/FlattenLayerNode.cpp b/src/graph/nodes/FlattenLayerNode.cpp
index 7c4059f3ed..8b847c7056 100644
--- a/src/graph2/nodes/FlattenLayerNode.cpp
+++ b/src/graph/nodes/FlattenLayerNode.cpp
@@ -21,14 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/FlattenLayerNode.h"
+#include "arm_compute/graph/nodes/FlattenLayerNode.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
FlattenLayerNode::FlattenLayerNode()
{
@@ -76,5 +76,5 @@ void FlattenLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/FloorLayer.cpp b/src/graph/nodes/FloorLayer.cpp
deleted file mode 100644
index 8750546ed9..0000000000
--- a/src/graph/nodes/FloorLayer.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/FloorLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> FloorLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::FloorLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::FloorLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index 3742150d37..cbf2b35ddd 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,18 +21,40 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/nodes/FullyConnectedLayer.h"
+#include "arm_compute/graph/nodes/FullyConnectedLayerNode.h"
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
-using namespace arm_compute::graph;
+namespace arm_compute
+{
+namespace graph
+{
+FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs)
+ : _num_outputs(num_outputs)
+{
+ _input_edges.resize(3, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
-namespace
+TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_shape, unsigned int num_outputs)
{
-TensorShape calculate_fullyconnected_layer_output_shape(const TensorShape &input_shape, unsigned int output_neurons)
+ unsigned int num_weights = 1;
+ unsigned int num_dimensions = input_shape.num_dimensions();
+ // Ignore the batch dimension if there is one:
+ if(num_dimensions == 2 || num_dimensions == 4)
+ {
+ num_dimensions--;
+ }
+ for(unsigned int i = 0; i < num_dimensions; i++)
+ {
+ num_weights *= input_shape[i];
+ }
+ return TensorShape(num_weights, num_outputs);
+}
+
+TensorShape FullyConnectedLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_outputs)
{
// Note: Only 1D batch space is supported at the moment
unsigned int batches = input_shape[1];
@@ -40,67 +62,46 @@ TensorShape calculate_fullyconnected_layer_output_shape(const TensorShape &input
{
batches = input_shape[3];
}
- return TensorShape(output_neurons, batches);
+ return TensorShape(num_outputs, batches);
}
-} // namespace
-std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+bool FullyConnectedLayerNode::forward_descriptors()
{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- if(_weights.tensor() == nullptr)
- {
- unsigned int num_weights = 1;
- unsigned int num_dimensions = in->info()->num_dimensions();
- // Ignore the batch dimension if there is one:
- if(num_dimensions == 2 || num_dimensions == 4)
- {
- num_dimensions--;
- }
- for(unsigned int i = 0; i < num_dimensions; i++)
- {
- num_weights *= in->info()->dimension(i);
- }
- _weights.set_info(TensorInfo(TensorShape(num_weights, _num_neurons), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
- }
- if(_biases.tensor() == nullptr)
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
- _biases.set_info(TensorInfo(TensorShape(_num_neurons), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
}
+ return false;
+}
- // Auto configure output
- arm_compute::auto_init_if_empty(*out->info(),
- calculate_fullyconnected_layer_output_shape(in->info()->tensor_shape(), _num_neurons),
- in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position());
-
- bool weights_are_loaded = _weights.tensor() != nullptr;
- bool biases_are_loaded = _biases.tensor() != nullptr;
+TensorDescriptor FullyConnectedLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
- // Create node context
- NodeContext node_ctx(OperationType::FullyConnectedLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_input(_weights.set_target(_target_hint));
- node_ctx.add_input(_biases.set_target(_target_hint));
- node_ctx.add_output(out);
+ TensorDescriptor output_info = src->desc();
+ TensorShape output_shape = compute_output_shape(src->desc().shape, _num_outputs);
+ output_info.shape = output_shape;
+ return output_info;
+}
- // Configure operation
- auto func = OperationRegistry::get().find_operation(OperationType::FullyConnectedLayer, _target_hint)->configure(node_ctx);
+Status FullyConnectedLayerNode::validate()
+{
+ return Status{};
+}
- // Fill biases
- if(!weights_are_loaded)
- {
- _weights.allocate_and_fill_if_needed();
- }
- if(!biases_are_loaded)
- {
- _biases.allocate_and_fill_if_needed();
- }
+NodeType FullyConnectedLayerNode::type() const
+{
+ return NodeType::FullyConnectedLayer;
+}
- // Get function
- return func;
+void FullyConnectedLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/nodes/InputNode.cpp b/src/graph/nodes/InputNode.cpp
index 84cce2acdb..e912633a66 100644
--- a/src/graph2/nodes/InputNode.cpp
+++ b/src/graph/nodes/InputNode.cpp
@@ -21,14 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/InputNode.h"
+#include "arm_compute/graph/nodes/InputNode.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
InputNode::InputNode(TensorDescriptor desc)
: _desc(desc)
@@ -68,5 +68,5 @@ void InputNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph/nodes/L2NormalizeLayer.cpp b/src/graph/nodes/L2NormalizeLayer.cpp
deleted file mode 100644
index 9813ba4450..0000000000
--- a/src/graph/nodes/L2NormalizeLayer.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/L2NormalizeLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-L2NormalizeLayer::L2NormalizeLayer(unsigned int axis, float epsilon)
- : _axis(axis), _epsilon(epsilon)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> L2NormalizeLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::L2NormalizeLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<unsigned int>("axis", _axis);
- node_ctx.add_parameter<float>("epsilon", _epsilon);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::L2NormalizeLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp
deleted file mode 100644
index a489329243..0000000000
--- a/src/graph/nodes/NormalizationLayer.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/NormalizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-NormalizationLayer::NormalizationLayer(const NormalizationLayerInfo norm_info)
- : _norm_info(norm_info)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::NormalizationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<NormalizationLayerInfo>("NormalizationLayerInfo", _norm_info);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::NormalizationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/NormalizationLayerNode.cpp b/src/graph/nodes/NormalizationLayerNode.cpp
index a394879a3e..a9f2fbd066 100644
--- a/src/graph2/nodes/NormalizationLayerNode.cpp
+++ b/src/graph/nodes/NormalizationLayerNode.cpp
@@ -21,15 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/NormalizationLayerNode.h"
+#include "arm_compute/graph/nodes/NormalizationLayerNode.h"
#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
NormalizationLayerNode::NormalizationLayerNode(NormalizationLayerInfo norm_info)
: _info(norm_info)
@@ -80,5 +80,5 @@ void NormalizationLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/nodes/OutputNode.cpp b/src/graph/nodes/OutputNode.cpp
index 1daebb1cc8..4c63bfa20c 100644
--- a/src/graph2/nodes/OutputNode.cpp
+++ b/src/graph/nodes/OutputNode.cpp
@@ -21,16 +21,16 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/OutputNode.h"
+#include "arm_compute/graph/nodes/OutputNode.h"
#include "arm_compute/core/Error.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
-#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Tensor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
OutputNode::OutputNode()
{
@@ -62,5 +62,5 @@ void OutputNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp
deleted file mode 100644
index 2c151194f3..0000000000
--- a/src/graph/nodes/PoolingLayer.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/PoolingLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-PoolingLayer::PoolingLayer(const PoolingLayerInfo pool_info)
- : _pool_info(pool_info)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::PoolingLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
- node_ctx.add_parameter<PoolingLayerInfo>("PoolingLayerInfo", _pool_info);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::PoolingLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/PoolingLayerNode.cpp b/src/graph/nodes/PoolingLayerNode.cpp
index 2c2cf5387a..a7b6b3679a 100644
--- a/src/graph2/nodes/PoolingLayerNode.cpp
+++ b/src/graph/nodes/PoolingLayerNode.cpp
@@ -21,15 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/PoolingLayerNode.h"
+#include "arm_compute/graph/nodes/PoolingLayerNode.h"
#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
PoolingLayerNode::PoolingLayerNode(PoolingLayerInfo pool_info)
: _info(std::move(pool_info))
@@ -99,5 +99,5 @@ void PoolingLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/QuantizationLayer.cpp b/src/graph/nodes/QuantizationLayer.cpp
deleted file mode 100644
index c102f47633..0000000000
--- a/src/graph/nodes/QuantizationLayer.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/QuantizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> QuantizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
-
- // Create node context
- NodeContext node_ctx(OperationType::QuantizationLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::QuantizationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/ReshapeLayer.cpp b/src/graph/nodes/ReshapeLayer.cpp
index b0c117e418..2757f06bd3 100644
--- a/src/graph/nodes/ReshapeLayer.cpp
+++ b/src/graph/nodes/ReshapeLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,37 +21,61 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/nodes/ReshapeLayer.h"
+#include "arm_compute/graph/nodes/ReshapeLayerNode.h"
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
-using namespace arm_compute::graph;
-
-ReshapeLayer::ReshapeLayer(TensorShape shape)
+namespace arm_compute
+{
+namespace graph
+{
+ReshapeLayerNode::ReshapeLayerNode(TensorShape shape)
: _shape(shape)
{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
}
-std::unique_ptr<arm_compute::IFunction> ReshapeLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+bool ReshapeLayerNode::forward_descriptors()
{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor ReshapeLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
- _target_hint = ctx.hints().target_hint();
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
+ TensorDescriptor output_desc = src->desc();
+ output_desc.shape = _shape;
- // Auto configure output
- arm_compute::auto_init_if_empty(*out->info(), _shape, 1, in->info()->data_type(), in->info()->fixed_point_position(), in->info()->quantization_info());
+ return output_desc;
+}
- // Create node context
- NodeContext node_ctx(OperationType::ReshapeLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
+Status ReshapeLayerNode::validate()
+{
+ return Status{};
+}
- // Get function
- return OperationRegistry::get().find_operation(OperationType::ReshapeLayer, _target_hint)->configure(node_ctx);
+NodeType ReshapeLayerNode::type() const
+{
+ return NodeType::ReshapeLayer;
+}
+
+void ReshapeLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/nodes/ResidualLayer.cpp b/src/graph/nodes/ResidualLayer.cpp
deleted file mode 100644
index 87404f9e1f..0000000000
--- a/src/graph/nodes/ResidualLayer.cpp
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/ResidualLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "arm_compute/graph/SubGraph.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "support/ToolchainSupport.h"
-#include "utils/Utils.h"
-
-#include <memory>
-#include <tuple>
-#include <vector>
-
-using namespace arm_compute::graph;
-
-/** Residual function */
-class ResidualFunction final : public arm_compute::IFunction
-{
-public:
- /** Default Constructor */
- ResidualFunction(GraphContext &ctx, ITensorObject *output)
- : _ctx(ctx), _input(nullptr), _output(output), _func(nullptr), _graphs(), _graph_outputs()
- {
- }
-
- /** Prevent instances from being copy constructed */
- ResidualFunction(const ResidualFunction &) = delete;
- /** Prevent instances from being copy assigned */
- const ResidualFunction &operator=(const ResidualFunction &) = delete;
- /** Prevent instances from being move constructed */
- ResidualFunction(ResidualFunction &&) = delete;
- /** Prevent instances from being move assigned */
- ResidualFunction &operator=(ResidualFunction &&) = delete;
- /** Default destructor */
- ~ResidualFunction() override = default;
-
- /** Set the input (when using only one sub graph)
- *
- * @param[in] input Input to set
- */
- void set_input(std::unique_ptr<ITensorObject> input)
- {
- _input = std::move(input);
- }
-
- /** Registers graph to be executed by the residual function
- *
- * @param[in] graph Graph to register
- * @param[in] output Output to register
- */
- void register_graph(std::unique_ptr<Graph> graph, std::unique_ptr<ITensorObject> output)
- {
- _graphs.push_back(std::move(graph));
- _graph_outputs.push_back(std::move(output));
- }
-
- /** Configure the function */
- void configure()
- {
- ARM_COMPUTE_ERROR_ON(_graphs.size() < 1 || _graphs.size() > 2);
- TargetHint target_hint = _ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::ArithmeticAddition);
- node_ctx.set_target(target_hint);
-
- if(_graphs.size() == 1)
- {
- arm_compute::ITensor *in = _input->tensor();
- node_ctx.add_input(in);
- }
-
- for(auto &o : _graph_outputs)
- {
- arm_compute::ITensor *in = o->tensor();
- node_ctx.add_input(in);
- }
-
- arm_compute::ITensor *out = _output->tensor();
- auto_init_if_empty(*out->info(), *_graph_outputs[0]->tensor()->info());
- node_ctx.add_output(out);
-
- _func = OperationRegistry::get().find_operation(OperationType::ArithmeticAddition, target_hint)->configure(node_ctx);
-
- for(auto &o : _graph_outputs)
- {
- o->allocate();
- }
- }
-
- // Inherited methods overriden:
- void run() override
- {
- ARM_COMPUTE_ERROR_ON(_graphs.size() < 1 || _graphs.size() > 2);
-
- for(auto &g : _graphs)
- {
- ARM_COMPUTE_ERROR_ON(g.get() == nullptr);
- g->run();
- }
-
- _func->run();
- }
-
-private:
- GraphContext _ctx;
- std::unique_ptr<ITensorObject> _input;
- ITensorObject *_output;
- std::unique_ptr<arm_compute::IFunction> _func;
- std::vector<std::unique_ptr<Graph>> _graphs;
- std::vector<std::unique_ptr<ITensorObject>> _graph_outputs;
-};
-
-std::unique_ptr<arm_compute::IFunction> ResidualLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(input) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(output) == nullptr);
-
- // Create residual function
- auto func = arm_compute::support::cpp14::make_unique<ResidualFunction>(ctx, output);
-
- if(_sub_graphs.size() == 1)
- {
- std::unique_ptr<ITensorObject> original_in;
- original_in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
- input->tensor()->info()->tensor_shape(),
- Coordinates());
- func->set_input(std::move(original_in));
- }
-
- // Constuct all sub-graphs given the input/output
- for(auto &sg : _sub_graphs)
- {
- ARM_COMPUTE_ERROR_ON(sg.get() == nullptr);
-
- // IO buffers
- std::unique_ptr<ITensorObject> in;
- std::unique_ptr<ITensorObject> out;
- std::unique_ptr<ITensorObject> func_in;
-
- // Create input sub-tensor
- if(!sg->has_input())
- {
- in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
- input->tensor()->info()->tensor_shape(),
- Coordinates());
- }
-
- // Create output sub-tensor
- if(!sg->has_output())
- {
- ITensorInfo *info = input->tensor()->info();
- func_in = arm_compute::support::cpp14::make_unique<Tensor>(TensorInfo(info->num_channels(), info->data_type(), info->fixed_point_position()));
- func_in->set_target(ctx.hints().target_hint());
- out = arm_compute::support::cpp14::make_unique<SubTensor>(func_in->tensor(),
- TensorShape(),
- Coordinates(0, 0, 0),
- func_in->target(),
- true);
- }
-
- // Construct sub_graph
- auto g = sg->construct(ctx, std::move(in), std::move(out));
-
- // Register graph to function
- func->register_graph(std::move(g), std::move(func_in));
- }
-
- func->configure();
-
- return std::move(func);
-}
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp
deleted file mode 100644
index 7f2325b312..0000000000
--- a/src/graph/nodes/SoftmaxLayer.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/SoftmaxLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
- ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
- arm_compute::ITensor *in = input->tensor();
- arm_compute::ITensor *out = output->tensor();
- _target_hint = ctx.hints().target_hint();
-
- // Create node context
- NodeContext node_ctx(OperationType::SoftmaxLayer);
- node_ctx.set_target(_target_hint);
- node_ctx.add_input(in);
- node_ctx.add_output(out);
-
- // Get function
- return OperationRegistry::get().find_operation(OperationType::SoftmaxLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/SoftmaxLayerNode.cpp b/src/graph/nodes/SoftmaxLayerNode.cpp
index 83bc978981..4c21ac6ad0 100644
--- a/src/graph2/nodes/SoftmaxLayerNode.cpp
+++ b/src/graph/nodes/SoftmaxLayerNode.cpp
@@ -21,15 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/SoftmaxLayerNode.h"
+#include "arm_compute/graph/nodes/SoftmaxLayerNode.h"
#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
SoftmaxLayerNode::SoftmaxLayerNode(float beta)
: _beta(beta)
@@ -80,5 +80,5 @@ void SoftmaxLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/nodes/SplitLayerNode.cpp b/src/graph/nodes/SplitLayerNode.cpp
index c34a7ff176..c8fb43c2a1 100644
--- a/src/graph2/nodes/SplitLayerNode.cpp
+++ b/src/graph/nodes/SplitLayerNode.cpp
@@ -21,15 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/nodes/SplitLayerNode.h"
+#include "arm_compute/graph/nodes/SplitLayerNode.h"
#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
SplitLayerNode::SplitLayerNode(unsigned int num_splits, unsigned int axis)
: _num_splits(num_splits), _axis(axis)
@@ -113,5 +113,5 @@ void SplitLayerNode::accept(INodeVisitor &v)
{
v.visit(*this);
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/operations/CLSimpleOperations.cpp b/src/graph/operations/CLSimpleOperations.cpp
deleted file mode 100644
index fe56122009..0000000000
--- a/src/graph/operations/CLSimpleOperations.cpp
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/graph/IOperation.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistrar.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/runtime/CL/CLFunctions.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphTypePrinter.h"
-#include "utils/TypePrinter.h"
-
-#include <memory>
-
-using namespace arm_compute::graph;
-
-/* Activation Layer */
-REGISTER_SIMPLE_OPERATION(CLActivationLayerOperation, OPENCL, OperationType::ActivationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo");
-
- // Create and configure function
- auto activation = arm_compute::support::cpp14::make_unique<arm_compute::CLActivationLayer>();
- activation->configure(in, out, act_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLActivationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Activation function: " << act_info.activation()
- << " a: " << act_info.a()
- << " b: " << act_info.b()
- << std::endl);
-
- return std::move(activation);
-}
-
-/* Arithmetic addition */
-REGISTER_SIMPLE_OPERATION(CLArithmeticAdditionOperation, OPENCL, OperationType::ArithmeticAddition)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in1 = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *in2 = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
- auto addition = arm_compute::support::cpp14::make_unique<arm_compute::CLArithmeticAddition>();
- addition->configure(in1, in2, out, ConvertPolicy::SATURATE);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLArithmeticAddition"
- << " Data Type: " << in1->info()->data_type()
- << " Input 1 shape: " << in1->info()->tensor_shape()
- << " Input 2 shape: " << in2->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(addition);
-}
-
-/* Batch Normalization Layer */
-REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationType::BatchNormalizationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 5);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *mean = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
- auto *var = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
- auto *beta = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3));
- auto *gamma = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- const auto epsilon = ctx.parameter<float>("epsilon");
- const auto act_info = ctx.parameter<ActivationLayerInfo>("act_info");
-
- // Create and configure function
- auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLBatchNormalizationLayer>();
- batch_norm->configure(in, out, mean, var, beta, gamma, epsilon, act_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLBatchNormalizationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Mean shape: " << mean->info()->tensor_shape()
- << " Var shape: " << var->info()->tensor_shape()
- << " Beta shape: " << beta->info()->tensor_shape()
- << " Gamma shape: " << gamma->info()->tensor_shape()
- << " Epsilon: " << epsilon
- << " Activation function: " << act_info.activation()
- << " a: " << act_info.a()
- << " b: " << act_info.b()
- << std::endl);
-
- return std::move(batch_norm);
-}
-
-/* DepthConvertLayer Layer */
-REGISTER_SIMPLE_OPERATION(CLDepthConvertLayerOperation, OPENCL, OperationType::DepthConvertLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- const auto conv_policy = ctx.parameter<ConvertPolicy>("ConvertPolicy");
- const auto shift = ctx.parameter<uint32_t>("shift");
-
- // Create and configure function
- auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthConvertLayer>();
- depthconvert->configure(in, out, conv_policy, shift);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDepthConvertLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " shift: " << shift
- << std::endl);
-
- return std::move(depthconvert);
-}
-
-/* DepthwiseConvolutionLayer Layer */
-REGISTER_SIMPLE_OPERATION(CLDepthwiseConvolutionOperation, OPENCL, OperationType::DepthwiseConvolutionLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2 && ctx.num_inputs() != 3);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *weights = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
- auto *biases = ctx.num_inputs() == 3 ? dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) : nullptr;
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- const auto conv_info = ctx.parameter<PadStrideInfo>("ConvolutionInfo");
- const auto opt3x3 = ctx.parameter<bool>("Optimized3x3");
-
- // Create and configure function
- std::unique_ptr<arm_compute::IFunction> func;
- bool run_3x3_opt = opt3x3 && weights->info()->dimension(0) == 3;
- if(run_3x3_opt)
- {
- auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthwiseConvolutionLayer3x3>();
- depwthwise_conv->configure(in, weights, biases, out, conv_info);
- func = std::move(depwthwise_conv);
- }
- else
- {
- auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthwiseConvolutionLayer>();
- depwthwise_conv->configure(in, weights, biases, out, conv_info);
- func = std::move(depwthwise_conv);
- }
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDepthwiseConvolutionLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape());
- if(biases == nullptr)
- {
- ARM_COMPUTE_LOG_GRAPH_INFO(" Biases shape: No biases provided" << std::endl);
- }
- else
- {
- ARM_COMPUTE_LOG_GRAPH_INFO(" Biases shape: " << biases->info()->tensor_shape() << std::endl);
- }
-
- return func;
-}
-
-/* DeQuantizationLayer Layer */
-REGISTER_SIMPLE_OPERATION(CLDequantizationLayerOperation, OPENCL, OperationType::DequantizationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 2);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(1)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- auto *min_max = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(1));
-
- // Create and configure function
- auto dequantization = arm_compute::support::cpp14::make_unique<arm_compute::CLDequantizationLayer>();
- dequantization->configure(in, out, min_max);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDequantizationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Min max shape: " << min_max->info()->tensor_shape()
- << std::endl);
-
- return std::move(dequantization);
-}
-
-/* Flatten Layer */
-REGISTER_SIMPLE_OPERATION(CLFlattenLayerOperation, OPENCL, OperationType::FlattenLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
- // Create and configure function
- auto flatten = arm_compute::support::cpp14::make_unique<arm_compute::CLFlattenLayer>();
- flatten->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFlattenLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(flatten);
-}
-
-/* Floor Layer */
-REGISTER_SIMPLE_OPERATION(CLFloorLayerOperation, OPENCL, OperationType::FloorLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
- // Create and configure function
- auto floor = arm_compute::support::cpp14::make_unique<arm_compute::CLFloor>();
- floor->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFloorLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(floor);
-}
-
-/* Fully Connected Layer */
-REGISTER_SIMPLE_OPERATION(CLFullyConnectedLayer, OPENCL, OperationType::FullyConnectedLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 3);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *weights = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
- auto *biases = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
- // Create and configure function
- auto fc = arm_compute::support::cpp14::make_unique<arm_compute::CLFullyConnectedLayer>();
- fc->configure(in, weights, biases, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFullyConnectedLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
- << " Biases Shape: " << biases->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(fc);
-}
-
-/* L2 Normalize Layer */
-REGISTER_SIMPLE_OPERATION(CLL2NormalizeLayerOperation, OPENCL, OperationType::L2NormalizeLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- const auto axis = ctx.parameter<unsigned int>("axis");
- const auto epsilon = ctx.parameter<float>("epsilon");
-
- // Create and configure function
- auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLL2NormalizeLayer>();
- l2_norm->configure(in, out, axis, epsilon);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLL2NormalizeLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Axis: " << axis
- << " Epsilon: " << epsilon
- << std::endl);
-
- return std::move(l2_norm);
-}
-
-/* Normalization Layer */
-REGISTER_SIMPLE_OPERATION(CLNormalizationLayerOperation, OPENCL, OperationType::NormalizationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- const auto norm_info = ctx.parameter<NormalizationLayerInfo>("NormalizationLayerInfo");
-
- // Create and configure function
- auto norm = arm_compute::support::cpp14::make_unique<arm_compute::CLNormalizationLayer>();
- norm->configure(in, out, norm_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLNormalizationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Normalization info: " << norm_info
- << std::endl);
-
- return std::move(norm);
-}
-
-/* Pooling Layer */
-REGISTER_SIMPLE_OPERATION(CLPoolingLayerOperation, OPENCL, OperationType::PoolingLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- const auto pool_info = ctx.parameter<PoolingLayerInfo>("PoolingLayerInfo");
-
- // Create and configure function
- auto pool = arm_compute::support::cpp14::make_unique<arm_compute::CLPoolingLayer>();
- pool->configure(in, out, pool_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLPoolingLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Pooling info: " << pool_info
- << std::endl);
-
- return std::move(pool);
-}
-
-/* Quantization Layer */
-REGISTER_SIMPLE_OPERATION(CLQuantizationLayerOperation, OPENCL, OperationType::QuantizationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
- // Create and configure function
- auto quantization = arm_compute::support::cpp14::make_unique<arm_compute::CLQuantizationLayer>();
- quantization->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLQuantizationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(quantization);
-}
-
-/* Reshape Layer */
-REGISTER_SIMPLE_OPERATION(CLReshapeLayerOperation, OPENCL, OperationType::ReshapeLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
- // Create and configure function
- auto reshape = arm_compute::support::cpp14::make_unique<arm_compute::CLReshapeLayer>();
- reshape->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLReshapeLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(reshape);
-}
-
-/* Softmax Layer */
-REGISTER_SIMPLE_OPERATION(CLSoftmaxLayerOperation, OPENCL, OperationType::SoftmaxLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
- // Create and configure function
- auto smx = arm_compute::support::cpp14::make_unique<arm_compute::CLSoftmaxLayer>();
- smx->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLSoftmaxLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(smx);
-}
diff --git a/src/graph/operations/NESimpleOperations.cpp b/src/graph/operations/NESimpleOperations.cpp
deleted file mode 100644
index 4154b9a59c..0000000000
--- a/src/graph/operations/NESimpleOperations.cpp
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/graph/IOperation.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistrar.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/runtime/NEON/NEFunctions.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphTypePrinter.h"
-#include "utils/TypePrinter.h"
-
-#include <memory>
-
-using namespace arm_compute::graph;
-
-/* Activation Layer */
-REGISTER_SIMPLE_OPERATION(NEActivationLayerOperation, NEON, OperationType::ActivationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo");
-
- // Create and configure function
- auto activation = arm_compute::support::cpp14::make_unique<arm_compute::NEActivationLayer>();
- activation->configure(in, out, act_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEActivationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Activation function: " << act_info.activation()
- << " a: " << act_info.a()
- << " b: " << act_info.b()
- << std::endl);
-
- return std::move(activation);
-}
-
-/* Arithmetic addition */
-REGISTER_SIMPLE_OPERATION(NEArithmeticAdditionOperation, NEON, OperationType::ArithmeticAddition)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in1 = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *in2 = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
- auto addition = arm_compute::support::cpp14::make_unique<arm_compute::NEArithmeticAddition>();
- addition->configure(in1, in2, out, ConvertPolicy::SATURATE);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEArithmeticAddition"
- << " Data Type: " << in1->info()->data_type()
- << " Input 1 shape: " << in1->info()->tensor_shape()
- << " Input 2 shape: " << in2->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(addition);
-}
-
-/* Batch Normalization Layer */
-REGISTER_SIMPLE_OPERATION(NEBatchNormalizationLayerOperation, NEON, OperationType::BatchNormalizationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 5);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(3)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(4)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *mean = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
- auto *var = dynamic_cast<arm_compute::ITensor *>(ctx.input(2));
- auto *beta = dynamic_cast<arm_compute::ITensor *>(ctx.input(3));
- auto *gamma = dynamic_cast<arm_compute::ITensor *>(ctx.input(4));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- const auto epsilon = ctx.parameter<float>("epsilon");
- const auto act_info = ctx.parameter<ActivationLayerInfo>("act_info");
-
- // Create and configure function
- auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEBatchNormalizationLayer>();
- batch_norm->configure(in, out, mean, var, beta, gamma, epsilon, act_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEBatchNormalizationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Mean shape: " << mean->info()->tensor_shape()
- << " Var shape: " << var->info()->tensor_shape()
- << " Beta shape: " << beta->info()->tensor_shape()
- << " Gamma shape: " << gamma->info()->tensor_shape()
- << " Epsilon: " << epsilon
- << " Activation function: " << act_info.activation()
- << " a: " << act_info.a()
- << " b: " << act_info.b()
- << std::endl);
-
- return std::move(batch_norm);
-}
-
-/* DepthConvertLayer Layer */
-REGISTER_SIMPLE_OPERATION(NEDepthConvertLayerOperation, NEON, OperationType::DepthConvertLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- const auto conv_policy = ctx.parameter<ConvertPolicy>("ConvertPolicy");
- const auto shift = ctx.parameter<uint32_t>("shift");
-
- // Create and configure function
- auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthConvertLayer>();
- depthconvert->configure(in, out, conv_policy, shift);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDepthConvertLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " shift: " << shift
- << std::endl);
-
- return std::move(depthconvert);
-}
-
-/* DepthwiseConvolutionLayer Layer */
-REGISTER_SIMPLE_OPERATION(NEDepthwiseConvolutionOperation, NEON, OperationType::DepthwiseConvolutionLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2 && ctx.num_inputs() != 3);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *weights = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
- auto *biases = ctx.num_inputs() == 3 ? dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) : nullptr;
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- const auto conv_info = ctx.parameter<PadStrideInfo>("ConvolutionInfo");
- const auto opt3x3 = ctx.parameter<bool>("Optimized3x3");
-
- // Create and configure function
- std::unique_ptr<arm_compute::IFunction> func;
- bool run_3x3_opt = opt3x3 && weights->info()->dimension(0) == 3;
- if(run_3x3_opt)
- {
- auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthwiseConvolutionLayer3x3>();
- depwthwise_conv->configure(in, weights, biases, out, conv_info);
- func = std::move(depwthwise_conv);
- }
- else
- {
- auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
- depwthwise_conv->configure(in, weights, biases, out, conv_info);
- func = std::move(depwthwise_conv);
- }
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDepthwiseConvolutionLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape());
- if(biases == nullptr)
- {
- ARM_COMPUTE_LOG_GRAPH_INFO(" Biases shape: No biases provided" << std::endl);
- }
- else
- {
- ARM_COMPUTE_LOG_GRAPH_INFO(" Biases shape: " << biases->info()->tensor_shape() << std::endl);
- }
-
- return func;
-}
-
-/* DeQuantizationLayer Layer */
-REGISTER_SIMPLE_OPERATION(NEDequantizationLayerOperation, NEON, OperationType::DequantizationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 2);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(1)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- auto *min_max = dynamic_cast<arm_compute::ITensor *>(ctx.output(1));
-
- // Create and configure function
- auto dequantization = arm_compute::support::cpp14::make_unique<arm_compute::NEDequantizationLayer>();
- dequantization->configure(in, out, min_max);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDequantizationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Min max shape: " << min_max->info()->tensor_shape()
- << std::endl);
-
- return std::move(dequantization);
-}
-
-/* Flatten Layer */
-REGISTER_SIMPLE_OPERATION(NEFlattenLayerOperation, NEON, OperationType::FlattenLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
- // Create and configure function
- auto flatten = arm_compute::support::cpp14::make_unique<arm_compute::NEFlattenLayer>();
- flatten->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFlattenLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(flatten);
-}
-
-/* Floor Layer */
-REGISTER_SIMPLE_OPERATION(NEFloorLayerOperation, NEON, OperationType::FloorLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
- // Create and configure function
- auto floor = arm_compute::support::cpp14::make_unique<arm_compute::NEFloor>();
- floor->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFloorLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(floor);
-}
-
-/* Fully Connected Layer */
-REGISTER_SIMPLE_OPERATION(NEFullyConnectedLayer, NEON, OperationType::FullyConnectedLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 3);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *weights = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
- auto *biases = dynamic_cast<arm_compute::ITensor *>(ctx.input(2));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
- // Create and configure function
- auto fc = arm_compute::support::cpp14::make_unique<arm_compute::NEFullyConnectedLayer>();
- fc->configure(in, weights, biases, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFullyConnectedLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
- << " Biases Shape: " << biases->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(fc);
-}
-
-/* L2 Normalize Layer */
-REGISTER_SIMPLE_OPERATION(NEL2NormalizeLayerOperation, NEON, OperationType::L2NormalizeLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- const auto axis = ctx.parameter<unsigned int>("axis");
- const auto epsilon = ctx.parameter<float>("epsilon");
-
- // Create and configure function
- auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEL2NormalizeLayer>();
- l2_norm->configure(in, out, axis, epsilon);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEL2NormalizeLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Axis: " << axis
- << " Epsilon: " << epsilon
- << std::endl);
-
- return std::move(l2_norm);
-}
-
-/* Normalization Layer */
-REGISTER_SIMPLE_OPERATION(NENormalizationLayerOperation, NEON, OperationType::NormalizationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- const auto norm_info = ctx.parameter<NormalizationLayerInfo>("NormalizationLayerInfo");
-
- // Create and configure function
- auto norm = arm_compute::support::cpp14::make_unique<arm_compute::NENormalizationLayer>();
- norm->configure(in, out, norm_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NENormalizationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Normalization info: " << norm_info
- << std::endl);
-
- return std::move(norm);
-}
-
-/* Pooling Layer */
-REGISTER_SIMPLE_OPERATION(NEPoolingLayerOperation, NEON, OperationType::PoolingLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- const auto pool_info = ctx.parameter<PoolingLayerInfo>("PoolingLayerInfo");
-
- // Create and configure function
- auto pool = arm_compute::support::cpp14::make_unique<arm_compute::NEPoolingLayer>();
- pool->configure(in, out, pool_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEPoolingLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Pooling info: " << pool_info
- << std::endl);
-
- return std::move(pool);
-}
-
-/* Quantization Layer */
-REGISTER_SIMPLE_OPERATION(NEQuantizationLayerOperation, NEON, OperationType::QuantizationLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
- // Create and configure function
- auto quantization = arm_compute::support::cpp14::make_unique<arm_compute::NEQuantizationLayer>();
- quantization->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEQuantizationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(quantization);
-}
-
-/* Reshape Layer */
-REGISTER_SIMPLE_OPERATION(NEReshapeLayerOperation, NEON, OperationType::ReshapeLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
- // Create and configure function
- auto reshape = arm_compute::support::cpp14::make_unique<arm_compute::NEReshapeLayer>();
- reshape->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEReshapeLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(reshape);
-}
-
-/* Softmax Layer */
-REGISTER_SIMPLE_OPERATION(NESoftmaxLayerOperation, NEON, OperationType::SoftmaxLayer)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
- // Create and configure function
- auto smx = arm_compute::support::cpp14::make_unique<arm_compute::NESoftmaxLayer>();
- smx->configure(in, out);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NESoftmaxLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return std::move(smx);
-}
diff --git a/src/graph2/printers/DotGraphPrinter.cpp b/src/graph/printers/DotGraphPrinter.cpp
index 04987eebe0..47b1bb56bf 100644
--- a/src/graph2/printers/DotGraphPrinter.cpp
+++ b/src/graph/printers/DotGraphPrinter.cpp
@@ -21,17 +21,17 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph2/printers/DotGraphPrinter.h"
+#include "arm_compute/graph/printers/DotGraphPrinter.h"
#include "arm_compute/core/Error.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/TypePrinter.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/TypePrinter.h"
+#include "arm_compute/graph/nodes/Nodes.h"
namespace arm_compute
{
-namespace graph2
+namespace graph
{
void DotGraphVisitor::visit(ActivationLayerNode &n)
{
@@ -169,5 +169,5 @@ void DotGraphPrinter::print_edges(const Graph &g, std::ostream &os)
}
}
}
-} // namespace graph2
+} // namespace graph
} // namespace arm_compute
diff --git a/src/graph2/Graph.cpp b/src/graph2/Graph.cpp
deleted file mode 100644
index ead67bc85a..0000000000
--- a/src/graph2/Graph.cpp
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/Graph.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-Graph::Graph(GraphID id, std::string name)
- : _id(id), _name(std::move(name)), _nodes(), _edges(), _tensors(), _tagged_nodes(), _mtx()
-{
-}
-
-bool Graph::remove_node(NodeID nid)
-{
- if(nid >= _nodes.size())
- {
- return false;
- }
-
- std::unique_ptr<INode> &node = _nodes[nid];
-
- // Remove node connections
- if(node)
- {
- for(auto &input_eid : node->_input_edges)
- {
- remove_connection(input_eid);
- }
- for(auto &outpud_eid : node->_output_edges)
- {
- remove_connection(outpud_eid);
- }
- }
-
- node = nullptr;
-
- return true;
-}
-
-EdgeID Graph::add_connection(NodeID source, size_t source_idx, NodeID sink, size_t sink_idx)
-{
- std::lock_guard<arm_compute::Mutex> lock(_mtx);
-
- // Check if node index is valid, if node exists and finally if the connection index is valid
- ARM_COMPUTE_ERROR_ON((source >= _nodes.size()) || (_nodes[source] == nullptr) || (source_idx >= _nodes[source]->num_outputs()));
- ARM_COMPUTE_ERROR_ON((sink >= _nodes.size()) || (_nodes[sink] == nullptr) || (sink_idx >= _nodes[sink]->num_inputs()));
-
- // Get nodes
- std::unique_ptr<INode> &source_node = _nodes[source];
- std::unique_ptr<INode> &sink_node = _nodes[sink];
-
- // Check for duplicate connections (Check only sink node)
- Edge *sink_node_edge = sink_node->input_edge(sink_idx);
- if((sink_node_edge != nullptr) && (sink_node_edge->producer_id() == source) && (sink_node_edge->producer_idx() == source_idx)
- && (sink_node_edge->consumer_id() == sink) && (sink_node_edge->consumer_idx() == sink_idx))
- {
- return sink_node_edge->id();
- }
-
- // Check if there is already a tensor associated with output if not create one
- TensorID tid = source_node->output_id(source_idx);
- if(tid == NullTensorID)
- {
- tid = create_tensor();
- }
- std::unique_ptr<Tensor> &tensor = _tensors[tid];
-
- // Create connections
- EdgeID eid = _edges.size();
- auto connection = arm_compute::support::cpp14::make_unique<Edge>(eid, source_node.get(), source_idx, sink_node.get(), sink_idx, tensor.get());
- _edges.push_back(std::move(connection));
-
- // Add connections to source and sink nodes
- source_node->_output_edges.insert(eid);
- sink_node->_input_edges[sink_idx] = eid;
-
- // Set tensor output node
- source_node->_outputs[source_idx] = tid;
-
- // Bind tensor to the edge
- tensor->bind_edge(eid);
-
- // Try and propagate shapes in sink node
- sink_node->forward_descriptors();
-
- return eid;
-}
-
-bool Graph::remove_connection(EdgeID eid)
-{
- if(eid >= _edges.size())
- {
- return false;
- }
-
- std::unique_ptr<Edge> &edge = _edges[eid];
-
- // Remove node connections
- if(edge != nullptr)
- {
- // Get tensor bound to the edge
- if(edge->tensor() != nullptr)
- {
- edge->tensor()->unbind_edge(eid);
- }
-
- // Remove edges from source node
- if(edge->producer() != nullptr)
- {
- edge->producer()->_output_edges.erase(eid);
- }
-
- // Remove edges from sink node
- if((edge->consumer() != nullptr) && (edge->consumer_idx() < edge->consumer()->_input_edges.size()))
- {
- edge->consumer()->_input_edges[edge->consumer_idx()] = EmptyEdgeID;
- }
- }
-
- // Clear edge
- edge = nullptr;
-
- return true;
-}
-
-TensorID Graph::create_tensor(TensorDescriptor desc)
-{
- TensorID tid = _tensors.size();
- auto tensor = support::cpp14::make_unique<Tensor>(tid, desc);
- _tensors.push_back(std::move(tensor));
-
- return tid;
-}
-
-std::string Graph::name() const
-{
- return _name;
-}
-
-GraphID Graph::id() const
-{
- return _id;
-}
-
-const std::vector<NodeID> &Graph::inputs()
-{
- return _tagged_nodes[NodeType::Input];
-}
-
-std::vector<std::unique_ptr<INode>> &Graph::nodes()
-{
- return _nodes;
-}
-
-const std::vector<std::unique_ptr<INode>> &Graph::nodes() const
-{
- return _nodes;
-}
-
-const std::vector<std::unique_ptr<Edge>> &Graph::edges() const
-{
- return _edges;
-}
-
-std::vector<std::unique_ptr<Tensor>> &Graph::tensors()
-{
- return _tensors;
-}
-
-const std::vector<std::unique_ptr<Tensor>> &Graph::tensors() const
-{
- return _tensors;
-}
-
-const INode *Graph::node(NodeID id) const
-{
- return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
-}
-
-INode *Graph::node(NodeID id)
-{
- return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
-}
-
-const Edge *Graph::edge(EdgeID id) const
-{
- return (id >= _edges.size()) ? nullptr : _edges[id].get();
-}
-
-Edge *Graph::edge(EdgeID id)
-{
- return (id >= _edges.size()) ? nullptr : _edges[id].get();
-}
-
-const Tensor *Graph::tensor(TensorID id) const
-{
- return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
-}
-
-Tensor *Graph::tensor(TensorID id)
-{
- return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
-}
-} // namespace graph2
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/GraphContext.cpp b/src/graph2/GraphContext.cpp
deleted file mode 100644
index 08a7b68dce..0000000000
--- a/src/graph2/GraphContext.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/GraphContext.h"
-#include <arm_compute/graph2.h>
-
-namespace arm_compute
-{
-namespace graph2
-{
-GraphContext::GraphContext()
- : _config(), _memory_managers()
-{
-}
-
-const GraphConfig &GraphContext::config() const
-{
- return _config;
-}
-
-void GraphContext::set_config(const GraphConfig &config)
-{
- _config = config;
-}
-
-bool GraphContext::insert_memory_management_ctx(MemoryManagerContext &&memory_ctx)
-{
- Target target = memory_ctx.target;
- if(target == Target::UNSPECIFIED || _memory_managers.find(target) != std::end(_memory_managers))
- {
- return false;
- }
-
- _memory_managers[target] = std::move(memory_ctx);
- return true;
-}
-
-MemoryManagerContext *GraphContext::memory_management_ctx(Target target)
-{
- return (_memory_managers.find(target) != std::end(_memory_managers)) ? &_memory_managers[target] : nullptr;
-}
-
-void GraphContext::finalize()
-{
- for(auto &mm_obj : _memory_managers)
- {
- if(mm_obj.second.mm != nullptr)
- {
- mm_obj.second.mm->finalize();
- }
- }
-}
-} // namespace graph2
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/INode.cpp b/src/graph2/INode.cpp
deleted file mode 100644
index 28be341396..0000000000
--- a/src/graph2/INode.cpp
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/INode.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/graph2/Edge.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Tensor.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-// *INDENT-OFF*
-// clang-format off
-INode::INode()
- : _graph(nullptr), _id(EmptyNodeID), _common_params({ "", Target::UNSPECIFIED}),
- _outputs(), _input_edges(), _output_edges(), _assigned_target(Target::UNSPECIFIED)
-{
-}
-// clang-format on
-// *INDENT-ON*
-
-void INode::set_graph(Graph *g)
-{
- ARM_COMPUTE_ERROR_ON(g == nullptr);
- _graph = g;
-}
-
-void INode::set_id(NodeID id)
-{
- _id = id;
-}
-
-void INode::set_common_node_parameters(NodeParams common_params)
-{
- _common_params = std::move(common_params);
-}
-
-void INode::set_requested_target(Target target)
-{
- _common_params.target = target;
-}
-
-void INode::set_assigned_target(Target target)
-{
- _assigned_target = target;
-}
-
-void INode::set_output_tensor(TensorID tid, size_t idx)
-{
- if(tid != NullTensorID && (idx < _outputs.size()) && (_graph->tensor(tid) != nullptr))
- {
- ARM_COMPUTE_ERROR_ON(_graph == nullptr);
- Tensor *updated_tensor = _graph->tensor(tid);
- _outputs[idx] = tid;
-
- // Set tensor to all output edges of the node
- for(auto &output_edge_id : _output_edges)
- {
- auto output_edge = _graph->edge(output_edge_id);
- if(output_edge != nullptr)
- {
- // Unbind edge from current tensor
- auto current_output_tensor = output_edge->tensor();
- current_output_tensor->unbind_edge(output_edge->id());
-
- // Update tensor to edge and rebind tensor
- output_edge->update_bound_tensor(updated_tensor);
- updated_tensor->bind_edge(output_edge->id());
- }
- }
- }
-}
-
-NodeID INode::id() const
-{
- return _id;
-}
-
-std::string INode::name() const
-{
- return _common_params.name;
-}
-
-const Graph *INode::graph() const
-{
- return _graph;
-}
-
-Graph *INode::graph()
-{
- return _graph;
-}
-
-const std::vector<TensorID> &INode::outputs() const
-{
- return _outputs;
-}
-
-const std::vector<EdgeID> &INode::input_edges() const
-{
- return _input_edges;
-}
-
-const std::set<EdgeID> &INode::output_edges() const
-{
- return _output_edges;
-}
-
-TensorID INode::input_id(size_t idx) const
-{
- ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
- Edge *e = _graph->edge(_input_edges[idx]);
- return (e != nullptr) ? e->tensor_id() : NullTensorID;
-}
-
-TensorID INode::output_id(size_t idx) const
-{
- ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
- return _outputs[idx];
-}
-
-Tensor *INode::input(size_t idx) const
-{
- ARM_COMPUTE_ERROR_ON(_graph == nullptr);
- ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
- Edge *e = _graph->edge(_input_edges[idx]);
- return (e != nullptr) ? e->tensor() : nullptr;
-}
-
-Tensor *INode::output(size_t idx) const
-{
- ARM_COMPUTE_ERROR_ON(_graph == nullptr);
- ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
- return _graph->tensor(_outputs[idx]);
-}
-
-EdgeID INode::input_edge_id(size_t idx) const
-{
- ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
- return _input_edges[idx];
-}
-
-Edge *INode::input_edge(size_t idx) const
-{
- ARM_COMPUTE_ERROR_ON(_graph == nullptr);
- ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
- return _graph->edge(_input_edges[idx]);
-}
-
-size_t INode::num_inputs() const
-{
- return _input_edges.size();
-}
-
-size_t INode::num_outputs() const
-{
- return _outputs.size();
-}
-
-Target INode::requested_target() const
-{
- return _common_params.target;
-}
-
-Target INode::assigned_target() const
-{
- return _assigned_target;
-}
-} // namespace graph2
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/Tensor.cpp b/src/graph2/Tensor.cpp
deleted file mode 100644
index c6054d716d..0000000000
--- a/src/graph2/Tensor.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/Tensor.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-Tensor::Tensor(TensorID id, TensorDescriptor desc)
- : _id(id), _desc(desc), _handle(nullptr), _accessor(nullptr), _bound_edges()
-{
-}
-
-TensorID Tensor::id() const
-{
- return _id;
-}
-
-TensorDescriptor &Tensor::desc()
-{
- return _desc;
-}
-
-const TensorDescriptor &Tensor::desc() const
-{
- return _desc;
-}
-
-void Tensor::set_handle(std::unique_ptr<ITensorHandle> backend_tensor)
-{
- _handle = std::move(backend_tensor);
-}
-
-ITensorHandle *Tensor::handle()
-{
- return _handle.get();
-}
-
-void Tensor::set_accessor(std::unique_ptr<ITensorAccessor> accessor)
-{
- _accessor = std::move(accessor);
-}
-
-ITensorAccessor *Tensor::accessor()
-{
- return _accessor.get();
-}
-
-bool Tensor::call_accessor()
-{
- // Early exit guard
- if(!_accessor || !_handle)
- {
- return false;
- }
-
- // Map tensor
- _handle->map(true);
-
- // Return in case of null backend buffer
- if(_handle->tensor().buffer() == nullptr)
- {
- return false;
- }
-
- // Call accessor
- _accessor->access_tensor(_handle->tensor());
-
- // Unmap tensor
- _handle->unmap();
-
- return true;
-}
-
-void Tensor::bind_edge(EdgeID eid)
-{
- _bound_edges.insert(eid);
-}
-
-void Tensor::unbind_edge(EdgeID eid)
-{
- _bound_edges.erase(eid);
-}
-
-const std::set<EdgeID> Tensor::bound_edges() const
-{
- return _bound_edges;
-}
-} // namespace graph2
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/nodes/FullyConnectedLayer.cpp b/src/graph2/nodes/FullyConnectedLayer.cpp
deleted file mode 100644
index 195adc40fe..0000000000
--- a/src/graph2/nodes/FullyConnectedLayer.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/nodes/FullyConnectedLayerNode.h"
-
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs)
- : _num_outputs(num_outputs)
-{
- _input_edges.resize(3, EmptyEdgeID);
- _outputs.resize(1, NullTensorID);
-}
-
-TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_shape, unsigned int num_outputs)
-{
- unsigned int num_weights = 1;
- unsigned int num_dimensions = input_shape.num_dimensions();
- // Ignore the batch dimension if there is one:
- if(num_dimensions == 2 || num_dimensions == 4)
- {
- num_dimensions--;
- }
- for(unsigned int i = 0; i < num_dimensions; i++)
- {
- num_weights *= input_shape[i];
- }
- return TensorShape(num_weights, num_outputs);
-}
-
-TensorShape FullyConnectedLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_outputs)
-{
- // Note: Only 1D batch space is supported at the moment
- unsigned int batches = input_shape[1];
- if(input_shape.num_dimensions() > 2)
- {
- batches = input_shape[3];
- }
- return TensorShape(num_outputs, batches);
-}
-
-bool FullyConnectedLayerNode::forward_descriptors()
-{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
- {
- Tensor *dst = output(0);
- ARM_COMPUTE_ERROR_ON(dst == nullptr);
- dst->desc() = configure_output(0);
- return true;
- }
- return false;
-}
-
-TensorDescriptor FullyConnectedLayerNode::configure_output(size_t idx) const
-{
- ARM_COMPUTE_UNUSED(idx);
- const Tensor *src = input(0);
- ARM_COMPUTE_ERROR_ON(src == nullptr);
-
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = compute_output_shape(src->desc().shape, _num_outputs);
- output_info.shape = output_shape;
- return output_info;
-}
-
-Status FullyConnectedLayerNode::validate()
-{
- return Status{};
-}
-
-NodeType FullyConnectedLayerNode::type() const
-{
- return NodeType::FullyConnectedLayer;
-}
-
-void FullyConnectedLayerNode::accept(INodeVisitor &v)
-{
- v.visit(*this);
-}
-} // namespace graph2
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph2/nodes/ReshapeLayer.cpp b/src/graph2/nodes/ReshapeLayer.cpp
deleted file mode 100644
index 6280eea75c..0000000000
--- a/src/graph2/nodes/ReshapeLayer.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/nodes/ReshapeLayerNode.h"
-
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-ReshapeLayerNode::ReshapeLayerNode(TensorShape shape)
- : _shape(shape)
-{
- _input_edges.resize(1, EmptyEdgeID);
- _outputs.resize(1, NullTensorID);
-}
-
-bool ReshapeLayerNode::forward_descriptors()
-{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
- {
- Tensor *dst = output(0);
- ARM_COMPUTE_ERROR_ON(dst == nullptr);
- dst->desc() = configure_output(0);
- return true;
- }
- return false;
-}
-
-TensorDescriptor ReshapeLayerNode::configure_output(size_t idx) const
-{
- ARM_COMPUTE_UNUSED(idx);
- ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
-
- const Tensor *src = input(0);
- ARM_COMPUTE_ERROR_ON(src == nullptr);
-
- TensorDescriptor output_desc = src->desc();
- output_desc.shape = _shape;
-
- return output_desc;
-}
-
-Status ReshapeLayerNode::validate()
-{
- return Status{};
-}
-
-NodeType ReshapeLayerNode::type() const
-{
- return NodeType::ReshapeLayer;
-}
-
-void ReshapeLayerNode::accept(INodeVisitor &v)
-{
- v.visit(*this);
-}
-} // namespace graph2
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/NEON/functions/NEWinogradLayer.cpp b/src/runtime/NEON/functions/NEWinogradLayer.cpp
index 0ac6d0966d..0a344f0cae 100644
--- a/src/runtime/NEON/functions/NEWinogradLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradLayer.cpp
@@ -248,7 +248,7 @@ void NEWinogradLayer::run()
Status NEWinogradLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, biases, output);
- ARM_COMPUTE_RETURN_ERROR_ON(validate_arguments(input, weights, biases, output, conv_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info));
return Status{};
}