aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--SConscript4
-rw-r--r--arm_compute/graph/IOperation.h14
-rw-r--r--arm_compute/graph/NodeContext.h9
-rw-r--r--arm_compute/graph/OperationRegistrar.h8
-rw-r--r--arm_compute/graph/OperationRegistry.h20
-rw-r--r--arm_compute/graph/Types.h14
-rw-r--r--arm_compute/graph/nodes/DepthConcatenateLayer.h58
-rw-r--r--arm_compute/graph/nodes/L2NormalizeLayer.h5
-rw-r--r--arm_compute/graph/operations/CL/CLActivationLayerOperation.h49
-rw-r--r--arm_compute/graph/operations/NEON/NEActivationLayerOperation.h49
-rw-r--r--src/graph/NodeContext.cpp7
-rw-r--r--src/graph/OperationRegistry.cpp4
-rw-r--r--src/graph/nodes/ActivationLayer.cpp12
-rw-r--r--src/graph/nodes/BatchNormalizationLayer.cpp74
-rw-r--r--src/graph/nodes/DepthConcatenateLayer.cpp106
-rw-r--r--src/graph/nodes/FloorLayer.cpp63
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp78
-rw-r--r--src/graph/nodes/L2NormalizeLayer.cpp70
-rw-r--r--src/graph/nodes/NormalizationLayer.cpp66
-rw-r--r--src/graph/nodes/PoolingLayer.cpp65
-rw-r--r--src/graph/nodes/SoftmaxLayer.cpp63
-rw-r--r--src/graph/operations/CL/CLActivationLayerOperation.cpp73
-rw-r--r--src/graph/operations/CLSimpleOperations.cpp277
-rw-r--r--src/graph/operations/NEON/NEActivationLayerOperation.cpp73
-rw-r--r--src/graph/operations/NESimpleOperations.cpp277
25 files changed, 708 insertions, 830 deletions
diff --git a/SConscript b/SConscript
index 4a351fff56..acaa9a593d 100644
--- a/SConscript
+++ b/SConscript
@@ -203,9 +203,7 @@ if env['os'] != 'bare_metal' and not env['standalone']:
if env['neon'] and env['opencl']:
graph_files = Glob('src/graph/*.cpp')
graph_files += Glob('src/graph/nodes/*.cpp')
-
- graph_files += Glob('src/graph/operations/CL/*.cpp')
- graph_files += Glob('src/graph/operations/NEON/*.cpp')
+ graph_files += Glob('src/graph/operations/*.cpp')
graph_files += Glob('src/graph/CL/*.cpp')
graph_files += Glob('src/graph/NEON/*.cpp')
diff --git a/arm_compute/graph/IOperation.h b/arm_compute/graph/IOperation.h
index c2c56a349d..a9fa4f83c7 100644
--- a/arm_compute/graph/IOperation.h
+++ b/arm_compute/graph/IOperation.h
@@ -51,6 +51,20 @@ public:
*/
virtual TargetHint target() const = 0;
};
+
+#define REGISTER_SIMPLE_OPERATION(NAME, TARGET, OP) \
+ class NAME : public IOperation \
+ { \
+ public: \
+ std::unique_ptr<arm_compute::IFunction> configure(NodeContext &ctx) final; \
+ TargetHint target() const final \
+ { \
+ return TargetHint::TARGET; \
+ } \
+ }; \
+ static detail::OperationRegistrar<NAME> NAME##_registrar(OP); \
+ std::unique_ptr<arm_compute::IFunction> NAME::configure(NodeContext &ctx)
+
} // namespace graph
} // namespace arm_compute
#endif /* __ARM_COMPUTE_GRAPH_IOPERATION_H__ */
diff --git a/arm_compute/graph/NodeContext.h b/arm_compute/graph/NodeContext.h
index 8e8a761d22..bc90f217a7 100644
--- a/arm_compute/graph/NodeContext.h
+++ b/arm_compute/graph/NodeContext.h
@@ -45,11 +45,10 @@ class NodeContext
{
public:
/** Default Constructor
- * (TODO(geopin01): Should we have an enum with all the supported ops instead?)
*
* @param[in] operation Name of the operation
*/
- NodeContext(std::string operation)
+ NodeContext(OperationType operation)
: _operation(operation), _target(TargetHint::DONT_CARE), _inputs(), _outputs(), _parameters() {};
/** Sets the execution target of the node
*
@@ -75,9 +74,9 @@ public:
void add_parameter(std::string name, T parameter);
/** Returns the operation of this node.
*
- * @return The operation name
+ * @return The operation type
*/
- std::string operation() const;
+ OperationType operation() const;
/** Returns the execution target of this node
*
* @return The execution target
@@ -117,7 +116,7 @@ public:
size_t num_outputs() const;
private:
- std::string _operation;
+ OperationType _operation;
TargetHint _target;
std::vector<arm_compute::ITensor *> _inputs;
std::vector<arm_compute::ITensor *> _outputs;
diff --git a/arm_compute/graph/OperationRegistrar.h b/arm_compute/graph/OperationRegistrar.h
index ff31963dc3..ee171c3510 100644
--- a/arm_compute/graph/OperationRegistrar.h
+++ b/arm_compute/graph/OperationRegistrar.h
@@ -43,15 +43,15 @@ class OperationRegistrar final
public:
/** Add a new test case with the given name to the framework.
*
- * @param[in] operation_name Operation name
+ * @param[in] operation Operation type
*/
- OperationRegistrar(std::string operation_name);
+ OperationRegistrar(OperationType operation);
};
template <typename T>
-inline OperationRegistrar<T>::OperationRegistrar(std::string operation_name)
+inline OperationRegistrar<T>::OperationRegistrar(OperationType operation)
{
- OperationRegistry::get().add_operation<T>(std::move(operation_name));
+ OperationRegistry::get().add_operation<T>(std::move(operation));
}
} // namespace detail
} // namespace graph
diff --git a/arm_compute/graph/OperationRegistry.h b/arm_compute/graph/OperationRegistry.h
index 905e8ee66f..ae68bf45a2 100644
--- a/arm_compute/graph/OperationRegistry.h
+++ b/arm_compute/graph/OperationRegistry.h
@@ -47,39 +47,39 @@ public:
static OperationRegistry &get();
/** Finds an operation in the registry
*
- * @param[in] operation Name of the operation to find
+ * @param[in] operation Type of the operation to find
* @param[in] target Target of the operation
*
* @return Pointer to the operation functor if found, else nullptr
*/
- IOperation *find_operation(const std::string &operation, TargetHint target);
+ IOperation *find_operation(OperationType operation, TargetHint target);
/** Checks if an operation for a given target exists
*
- * @param[in] operation Operation name
+ * @param[in] operation Operation type
* @param[in] target Execution target
*
* @return True if exists else false
*/
- bool contains(const std::string &operation, TargetHint target) const;
- /** Registers and operation to the registry
+ bool contains(OperationType operation, TargetHint target) const;
+ /** Registers an operation to the registry
*
- * @param operation_name Name of the operation to register
+ * @param operation Operation to register
*/
template <typename T>
- void add_operation(const std::string &operation_name);
+ void add_operation(OperationType operation);
private:
/** Default Constructor */
OperationRegistry();
private:
- std::map<std::string, std::vector<std::unique_ptr<IOperation>>> _registered_ops;
+ std::map<OperationType, std::vector<std::unique_ptr<IOperation>>> _registered_ops;
};
template <typename T>
-inline void OperationRegistry::add_operation(const std::string &operation_name)
+inline void OperationRegistry::add_operation(OperationType operation)
{
- _registered_ops[operation_name].emplace_back(support::cpp14::make_unique<T>());
+ _registered_ops[operation].emplace_back(support::cpp14::make_unique<T>());
}
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index f02fa7df3f..662a7404ef 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -71,6 +71,20 @@ enum class ConvolutionMethodHint
DIRECT /**< Direct convolution */
};
+/** Supported layer operations */
+enum class OperationType
+{
+ ActivationLayer,
+ BatchNormalizationLayer,
+ ConvolutionLayer,
+ FloorLayer,
+ FullyConnectedLayer,
+ L2NormalizeLayer,
+ NormalizationLayer,
+ PoolingLayer,
+ SoftmaxLayer
+};
+
/** Branch layer merging method */
enum class BranchMergeMethod
{
diff --git a/arm_compute/graph/nodes/DepthConcatenateLayer.h b/arm_compute/graph/nodes/DepthConcatenateLayer.h
deleted file mode 100644
index ac347a46d6..0000000000
--- a/arm_compute/graph/nodes/DepthConcatenateLayer.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_GRAPH_DEPTH_CONCATENATE_LAYER_H__
-#define __ARM_COMPUTE_GRAPH_DEPTH_CONCATENATE_LAYER_H__
-
-#include "arm_compute/graph/GraphContext.h"
-#include "arm_compute/graph/INode.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/graph/Types.h"
-
-namespace arm_compute
-{
-namespace graph
-{
-/** Depth Concatenate Layer node */
-class DepthConcatenateLayer
-{
-public:
- /** Default Constructor */
- DepthConcatenateLayer() = default;
- DepthConcatenateLayer(const DepthConcatenateLayer &) = delete;
- DepthConcatenateLayer &operator=(const DepthConcatenateLayer &) = delete;
- DepthConcatenateLayer(DepthConcatenateLayer &&) = default;
- DepthConcatenateLayer &operator=(DepthConcatenateLayer &&) = delete;
-
- // Inherited methods overriden:
- std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, std::vector<ITensor *> inputs, ITensor *output);
- void print_info();
-
-private:
- TargetHint _hint{ TargetHint::DONT_CARE };
- std::vector<ITensor *> _inputs{ nullptr };
- ITensor *_output{ nullptr };
-};
-} // namespace graph
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_GRAPH_DEPTH_CONCATENATE_LAYER_H__ */
diff --git a/arm_compute/graph/nodes/L2NormalizeLayer.h b/arm_compute/graph/nodes/L2NormalizeLayer.h
index ddc1646485..fc2bbc2d19 100644
--- a/arm_compute/graph/nodes/L2NormalizeLayer.h
+++ b/arm_compute/graph/nodes/L2NormalizeLayer.h
@@ -42,10 +42,7 @@ public:
* @param[in] axis Dimension along which to reduce.
* @param[in] epsilon Lower bound value for the normalization.
*/
- explicit L2NormalizeLayer(unsigned int axis, float epsilon)
- : _axis(axis), _epsilon(epsilon)
- {
- }
+ explicit L2NormalizeLayer(unsigned int axis, float epsilon);
// Inherited methods overriden:
std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output) override;
diff --git a/arm_compute/graph/operations/CL/CLActivationLayerOperation.h b/arm_compute/graph/operations/CL/CLActivationLayerOperation.h
deleted file mode 100644
index 2053fb674d..0000000000
--- a/arm_compute/graph/operations/CL/CLActivationLayerOperation.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_GRAPH_CL_ACTIVATIONLAYER_OPERATION_H__
-#define __ARM_COMPUTE_GRAPH_CL_ACTIVATIONLAYER_OPERATION_H__
-
-#include "arm_compute/graph/IOperation.h"
-
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/runtime/IFunction.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace graph
-{
-/** Operation functor interface */
-class CLActivationLayerOperation : public IOperation
-{
-public:
- // Inherited methods overriden:
- std::unique_ptr<arm_compute::IFunction> configure(NodeContext &ctx) final;
- TargetHint target() const final;
-};
-} // namespace graph
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_GRAPH_CL_ACTIVATIONLAYER_OPERATION_H__ */
diff --git a/arm_compute/graph/operations/NEON/NEActivationLayerOperation.h b/arm_compute/graph/operations/NEON/NEActivationLayerOperation.h
deleted file mode 100644
index f6e1bd7d39..0000000000
--- a/arm_compute/graph/operations/NEON/NEActivationLayerOperation.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_GRAPH_NE_ACTIVATIONLAYER_OPERATION_H__
-#define __ARM_COMPUTE_GRAPH_NE_ACTIVATIONLAYER_OPERATION_H__
-
-#include "arm_compute/graph/IOperation.h"
-
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/runtime/IFunction.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace graph
-{
-/** Operation functor interface */
-class NEActivationLayerOperation : public IOperation
-{
-public:
- // Inherited methods overriden:
- std::unique_ptr<arm_compute::IFunction> configure(NodeContext &ctx) final;
- TargetHint target() const final;
-};
-} // namespace graph
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_GRAPH_NE_ACTIVATIONLAYER_OPERATION_H__ */
diff --git a/src/graph/NodeContext.cpp b/src/graph/NodeContext.cpp
index 5b0dc6c5d8..2aa5aa13e8 100644
--- a/src/graph/NodeContext.cpp
+++ b/src/graph/NodeContext.cpp
@@ -25,6 +25,11 @@
using namespace arm_compute::graph;
+void NodeContext::set_target(TargetHint target)
+{
+ _target = target;
+}
+
void NodeContext::add_input(arm_compute::ITensor *input)
{
ARM_COMPUTE_ERROR_ON(input == nullptr);
@@ -37,7 +42,7 @@ void NodeContext::add_output(arm_compute::ITensor *output)
_outputs.emplace_back(output);
}
-std::string NodeContext::operation() const
+OperationType NodeContext::operation() const
{
return _operation;
}
diff --git a/src/graph/OperationRegistry.cpp b/src/graph/OperationRegistry.cpp
index 7de714b214..651653f19c 100644
--- a/src/graph/OperationRegistry.cpp
+++ b/src/graph/OperationRegistry.cpp
@@ -36,7 +36,7 @@ OperationRegistry &OperationRegistry::get()
return instance;
}
-IOperation *OperationRegistry::find_operation(const std::string &operation, TargetHint target)
+IOperation *OperationRegistry::find_operation(OperationType operation, TargetHint target)
{
ARM_COMPUTE_ERROR_ON(!contains(operation, target));
auto it = std::find_if(_registered_ops[operation].begin(), _registered_ops[operation].end(), [&](const std::unique_ptr<IOperation> &op)
@@ -47,7 +47,7 @@ IOperation *OperationRegistry::find_operation(const std::string &operation, Targ
return (*it).get();
}
-bool OperationRegistry::contains(const std::string &operation, TargetHint target) const
+bool OperationRegistry::contains(OperationType operation, TargetHint target) const
{
auto it = _registered_ops.find(operation);
if(it != _registered_ops.end())
diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp
index ea87fd9592..d3352140dc 100644
--- a/src/graph/nodes/ActivationLayer.cpp
+++ b/src/graph/nodes/ActivationLayer.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/graph/NodeContext.h"
#include "arm_compute/graph/OperationRegistry.h"
+#include "support/ToolchainSupport.h"
using namespace arm_compute::graph;
@@ -38,20 +39,17 @@ std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(GraphC
ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
-
arm_compute::ITensor *in = input->tensor();
arm_compute::ITensor *out = output->tensor();
+ _target_hint = ctx.hints().target_hint();
// Create node context
- NodeContext node_ctx("ActivationLayer");
+ NodeContext node_ctx(OperationType::ActivationLayer);
+ node_ctx.set_target(_target_hint);
node_ctx.add_input(in);
node_ctx.add_output(out);
node_ctx.add_parameter<ActivationLayerInfo>("ActivationLayerInfo", _activation_info);
// Get function
- func = OperationRegistry::get().find_operation("ActivationLayer", _target_hint)->configure(node_ctx);
-
- return func;
+ return OperationRegistry::get().find_operation(OperationType::ActivationLayer, _target_hint)->configure(node_ctx);
}
diff --git a/src/graph/nodes/BatchNormalizationLayer.cpp b/src/graph/nodes/BatchNormalizationLayer.cpp
index db809f4ee4..bce19016d7 100644
--- a/src/graph/nodes/BatchNormalizationLayer.cpp
+++ b/src/graph/nodes/BatchNormalizationLayer.cpp
@@ -23,60 +23,20 @@
*/
#include "arm_compute/graph/nodes/BatchNormalizationLayer.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h"
-#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
using namespace arm_compute::graph;
-namespace
-{
-template <typename BatchBatchNormalizationLayer, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *output, Tensor &mean, Tensor &var, Tensor &beta, Tensor &gamma, float epsilon)
-{
- auto norm = arm_compute::support::cpp14::make_unique<BatchBatchNormalizationLayer>();
- norm->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(output),
- dynamic_cast<TensorType *>(mean.set_target(target_hint)),
- dynamic_cast<TensorType *>(var.set_target(target_hint)),
- dynamic_cast<TensorType *>(beta.set_target(target_hint)),
- dynamic_cast<TensorType *>(gamma.set_target(target_hint)),
- epsilon);
-
- return std::move(norm);
-}
-
-template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *output, Tensor &mean, Tensor &var, Tensor &beta, Tensor &gamma, float epsilon);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *output, Tensor &mean, Tensor &var, Tensor &beta, Tensor &gamma,
- float epsilon)
-{
- return instantiate_function<arm_compute::CLBatchNormalizationLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, output, mean, var, beta, gamma, epsilon);
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *output, Tensor &mean, Tensor &var, Tensor &beta, Tensor &gamma, float epsilon)
-{
- return instantiate_function<arm_compute::NEBatchNormalizationLayer, arm_compute::ITensor, TargetHint::NEON>(input, output, mean, var, beta, gamma, epsilon);
-}
-} // namespace
-
std::unique_ptr<arm_compute::IFunction> BatchNormalizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
{
ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
-
arm_compute::ITensor *in = input->tensor();
arm_compute::ITensor *out = output->tensor();
+ _target_hint = ctx.hints().target_hint();
unsigned int batch_norm_size = in->info()->dimension(2);
if(_mean.tensor() == nullptr)
@@ -96,21 +56,17 @@ std::unique_ptr<arm_compute::IFunction> BatchNormalizationLayer::instantiate_nod
_gamma.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
}
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(in, out, _mean, _var, _beta, _gamma, _epsilon);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLBatchNormalizationLayer");
- }
- else
- {
- func = instantiate<TargetHint::NEON>(in, out, _mean, _var, _beta, _gamma, _epsilon);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEBatchNormalizationLayer");
- }
-
- ARM_COMPUTE_LOG_GRAPH_INFO(" Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
+ // Create node context
+ NodeContext node_ctx(OperationType::BatchNormalizationLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_input(_mean.tensor());
+ node_ctx.add_input(_var.tensor());
+ node_ctx.add_input(_beta.tensor());
+ node_ctx.add_input(_gamma.tensor());
+ node_ctx.add_output(out);
+ node_ctx.add_parameter<float>("epsilon", _epsilon);
- return func;
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::BatchNormalizationLayer, _target_hint)->configure(node_ctx);
} \ No newline at end of file
diff --git a/src/graph/nodes/DepthConcatenateLayer.cpp b/src/graph/nodes/DepthConcatenateLayer.cpp
deleted file mode 100644
index 2171db3a3e..0000000000
--- a/src/graph/nodes/DepthConcatenateLayer.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <algorithm>
-#include <vector>
-
-#include "arm_compute/graph/nodes/DepthConcatenateLayer.h"
-
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLDepthConcatenate.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConcatenate.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
-
-using namespace arm_compute::graph;
-
-namespace
-{
-template <typename DepthConcatenationType, typename TensorType, TargetHint hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(std::vector<arm_compute::ITensor *> inputs, arm_compute::ITensor *output)
-{
- auto depth_concat = arm_compute::support::cpp14::make_unique<DepthConcatenationType>();
- std::vector<TensorType *> casted_inputs;
- std::transform(inputs.begin(), inputs.end(), std::back_inserter(casted_inputs), [](arm_compute::ITensor * input)
- {
- return dynamic_cast<TensorType *>(input);
- });
- depth_concat->configure(
- casted_inputs,
- dynamic_cast<TensorType *>(output));
-
- return std::move(depth_concat);
-}
-
-template <TargetHint hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(std::vector<arm_compute::ITensor *> inputs, arm_compute::ITensor *output);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(std::vector<arm_compute::ITensor *> inputs, arm_compute::ITensor *output)
-{
- return instantiate_function<arm_compute::CLDepthConcatenate, arm_compute::ICLTensor, TargetHint::OPENCL>(std::move(inputs), output);
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(std::vector<arm_compute::ITensor *> inputs, arm_compute::ITensor *output)
-{
- return instantiate_function<arm_compute::NEDepthConcatenate, arm_compute::ITensor, TargetHint::NEON>(std::move(inputs), output);
-}
-} // namespace
-
-std::unique_ptr<arm_compute::IFunction> DepthConcatenateLayer::instantiate_node(GraphContext &ctx, std::vector<arm_compute::ITensor *> inputs, arm_compute::ITensor *output)
-{
- std::unique_ptr<arm_compute::IFunction> func;
- _hint = ctx.hints().target_hint();
- _inputs = std::move(inputs);
- _output = output;
-
- if(_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(_inputs, _output);
- }
- else
- {
- func = instantiate<TargetHint::NEON>(_inputs, _output);
- }
- return func;
-}
-
-void DepthConcatenateLayer::print_info()
-{
- if(_hint == TargetHint::OPENCL)
- {
- std::cout << "Instantiating NEDepthConcatenate";
- }
- else
- {
- std::cout << "Instantiating CLDepthConcatenate";
- }
-
- for(const auto &i : _inputs)
- {
- std::cout << " Input: " << i->info()->tensor_shape();
- }
- std::cout << " Output: " << _output->info()->tensor_shape();
-}
diff --git a/src/graph/nodes/FloorLayer.cpp b/src/graph/nodes/FloorLayer.cpp
index 45e2c3ee41..21c82b8657 100644
--- a/src/graph/nodes/FloorLayer.cpp
+++ b/src/graph/nodes/FloorLayer.cpp
@@ -23,70 +23,27 @@
*/
#include "arm_compute/graph/nodes/FloorLayer.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLFloor.h"
-#include "arm_compute/runtime/NEON/functions/NEFloor.h"
-#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
using namespace arm_compute::graph;
-namespace
-{
-template <typename FloorType, typename TensorType, TargetHint hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *output)
-{
- auto floorlayer = arm_compute::support::cpp14::make_unique<FloorType>();
- floorlayer->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(output));
-
- return std::move(floorlayer);
-}
-
-template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *output);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *output)
-{
- return instantiate_function<arm_compute::CLFloor, arm_compute::ICLTensor, TargetHint::OPENCL>(input, output);
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *output)
-{
- return instantiate_function<arm_compute::NEFloor, arm_compute::ITensor, TargetHint::NEON>(input, output);
-}
-} // namespace
-
std::unique_ptr<arm_compute::IFunction> FloorLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
{
ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
-
arm_compute::ITensor *in = input->tensor();
arm_compute::ITensor *out = output->tensor();
+ _target_hint = ctx.hints().target_hint();
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(in, out);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFloorLayer");
- }
- else
- {
- func = instantiate<TargetHint::NEON>(in, out);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFloorLayer");
- }
-
- ARM_COMPUTE_LOG_GRAPH_INFO(" Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
+ // Create node context
+ NodeContext node_ctx(OperationType::FloorLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(out);
- return func;
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::FloorLayer, _target_hint)->configure(node_ctx);
}
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index 5f4807ad48..39ed827631 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -23,11 +23,9 @@
*/
#include "arm_compute/graph/nodes/FullyConnectedLayer.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
using namespace arm_compute::graph;
@@ -43,44 +41,6 @@ TensorShape calculate_fullyconnected_layer_output_shape(const TensorShape &input
}
return TensorShape(output_neurons, batches);
}
-template <typename FullyConnectedType, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, Tensor &weights, Tensor &biases, arm_compute::ITensor *output)
-{
- bool weights_are_loaded = weights.tensor() != nullptr;
- bool biases_are_loaded = biases.tensor() != nullptr;
-
- auto conv = arm_compute::support::cpp14::make_unique<FullyConnectedType>();
- conv->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(weights.set_target(target_hint)),
- dynamic_cast<TensorType *>(biases.set_target(target_hint)),
- dynamic_cast<TensorType *>(output));
- if(!weights_are_loaded)
- {
- weights.allocate_and_fill_if_needed();
- }
- if(!biases_are_loaded)
- {
- biases.allocate_and_fill_if_needed();
- }
-
- return std::move(conv);
-}
-
-template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, Tensor &weights, Tensor &biases, arm_compute::ITensor *output);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, Tensor &weights, Tensor &biases, arm_compute::ITensor *output)
-{
- return instantiate_function<arm_compute::CLFullyConnectedLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output);
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, Tensor &weights, Tensor &biases, arm_compute::ITensor *output)
-{
- return instantiate_function<arm_compute::NEFullyConnectedLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output);
-}
} // namespace
std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
@@ -90,6 +50,7 @@ std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Gr
arm_compute::ITensor *in = input->tensor();
arm_compute::ITensor *out = output->tensor();
+ _target_hint = ctx.hints().target_hint();
if(_weights.tensor() == nullptr)
{
@@ -116,26 +77,27 @@ std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Gr
calculate_fullyconnected_layer_output_shape(in->info()->tensor_shape(), _num_neurons),
in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position());
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
+ bool weights_are_loaded = _weights.tensor() != nullptr;
+ bool biases_are_loaded = _biases.tensor() != nullptr;
+
+ // Create node context
+ NodeContext node_ctx(OperationType::FullyConnectedLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_input(_weights.set_target(_target_hint));
+ node_ctx.add_input(_biases.set_target(_target_hint));
+ node_ctx.add_output(out);
- if(_target_hint == TargetHint::OPENCL)
+ // Fill biases
+ if(!weights_are_loaded)
{
- func = instantiate<TargetHint::OPENCL>(in, _weights, _biases, out);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFullyConnectedLayer");
+ _weights.allocate_and_fill_if_needed();
}
- else
+ if(!biases_are_loaded)
{
- func = instantiate<TargetHint::NEON>(in, _weights, _biases, out);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFullyConnectedLayer");
+ _biases.allocate_and_fill_if_needed();
}
- ARM_COMPUTE_LOG_GRAPH_INFO(" Type: " << in->info()->data_type()
- << " Input Shape: " << in->info()->tensor_shape()
- << " Weights shape: " << _weights.info().tensor_shape()
- << " Biases Shape: " << _biases.info().tensor_shape()
- << " Output Shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return func;
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::FullyConnectedLayer, _target_hint)->configure(node_ctx);
}
diff --git a/src/graph/nodes/L2NormalizeLayer.cpp b/src/graph/nodes/L2NormalizeLayer.cpp
index c5689e159a..bcc3b94178 100644
--- a/src/graph/nodes/L2NormalizeLayer.cpp
+++ b/src/graph/nodes/L2NormalizeLayer.cpp
@@ -23,72 +23,34 @@
*/
#include "arm_compute/graph/nodes/L2NormalizeLayer.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLL2Normalize.h"
-#include "arm_compute/runtime/NEON/functions/NEL2Normalize.h"
-#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
using namespace arm_compute::graph;
-namespace
+L2NormalizeLayer::L2NormalizeLayer(unsigned int axis, float epsilon)
+ : _axis(axis), _epsilon(epsilon)
{
-template <typename L2NormalizeType, typename TensorType, TargetHint hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *output, unsigned int axis, float epsilon)
-{
- auto l2norm = arm_compute::support::cpp14::make_unique<L2NormalizeType>();
- l2norm->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(output),
- axis,
- epsilon);
-
- return std::move(l2norm);
}
-template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *output, unsigned int axis, float epsilon);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *output, unsigned int axis, float epsilon)
-{
- return instantiate_function<arm_compute::CLL2Normalize, arm_compute::ICLTensor, TargetHint::OPENCL>(input, output, axis, epsilon);
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *output, unsigned int axis, float epsilon)
-{
- return instantiate_function<arm_compute::NEL2Normalize, arm_compute::ITensor, TargetHint::NEON>(input, output, axis, epsilon);
-}
-} // namespace
-
std::unique_ptr<arm_compute::IFunction> L2NormalizeLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
{
ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
-
arm_compute::ITensor *in = input->tensor();
arm_compute::ITensor *out = output->tensor();
-
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(in, out, _axis, _epsilon);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLL2NormalizeLayer");
- }
- else
- {
- func = instantiate<TargetHint::NEON>(in, out, _axis, _epsilon);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEL2NormalizeLayer");
- }
-
- ARM_COMPUTE_LOG_GRAPH_INFO(" Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
-
- return func;
+ _target_hint = ctx.hints().target_hint();
+
+ // Create node context
+ NodeContext node_ctx(OperationType::L2NormalizeLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(out);
+ node_ctx.add_parameter<unsigned int>("axis", _axis);
+ node_ctx.add_parameter<float>("epsilon", _epsilon);
+
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::L2NormalizeLayer, _target_hint)->configure(node_ctx);
}
diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp
index 680925a2b9..5036231a36 100644
--- a/src/graph/nodes/NormalizationLayer.cpp
+++ b/src/graph/nodes/NormalizationLayer.cpp
@@ -23,45 +23,12 @@
*/
#include "arm_compute/graph/nodes/NormalizationLayer.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLNormalizationLayer.h"
-#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h"
-#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
using namespace arm_compute::graph;
-namespace
-{
-template <typename NormalizationType, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *output, const NormalizationLayerInfo &norm_info)
-{
- auto norm = arm_compute::support::cpp14::make_unique<NormalizationType>();
- norm->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(output),
- norm_info);
-
- return std::move(norm);
-}
-
-template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *output, const NormalizationLayerInfo &norm_info);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *output, const NormalizationLayerInfo &norm_info)
-{
- return instantiate_function<arm_compute::CLNormalizationLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, output, norm_info);
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *output, const NormalizationLayerInfo &norm_info)
-{
- return instantiate_function<arm_compute::NENormalizationLayer, arm_compute::ITensor, TargetHint::NEON>(input, output, norm_info);
-}
-} // namespace
-
NormalizationLayer::NormalizationLayer(const NormalizationLayerInfo norm_info)
: _norm_info(norm_info)
{
@@ -72,28 +39,17 @@ std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(Gra
ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
-
arm_compute::ITensor *in = input->tensor();
arm_compute::ITensor *out = output->tensor();
+ _target_hint = ctx.hints().target_hint();
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(in, out, _norm_info);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLNormalizationLayer");
- }
- else
- {
- func = instantiate<TargetHint::NEON>(in, out, _norm_info);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NENormalizationLayer");
- }
-
- ARM_COMPUTE_LOG_GRAPH_INFO(" Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Normalization info: " << _norm_info
- << std::endl);
+ // Create node context
+ NodeContext node_ctx(OperationType::NormalizationLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(out);
+ node_ctx.add_parameter<NormalizationLayerInfo>("NormalizationLayerInfo", _norm_info);
- return func;
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::NormalizationLayer, _target_hint)->configure(node_ctx);
}
diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp
index 63579155cb..26df585e3b 100644
--- a/src/graph/nodes/PoolingLayer.cpp
+++ b/src/graph/nodes/PoolingLayer.cpp
@@ -23,45 +23,12 @@
*/
#include "arm_compute/graph/nodes/PoolingLayer.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
-#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
using namespace arm_compute::graph;
-namespace
-{
-template <typename PoolingType, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *output, const PoolingLayerInfo &pool_info)
-{
- auto pool = arm_compute::support::cpp14::make_unique<PoolingType>();
- pool->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(output),
- pool_info);
-
- return std::move(pool);
-}
-
-template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *output, const PoolingLayerInfo &pool_info);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *output, const PoolingLayerInfo &pool_info)
-{
- return instantiate_function<arm_compute::CLPoolingLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, output, pool_info);
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *output, const PoolingLayerInfo &pool_info)
-{
- return instantiate_function<arm_compute::NEPoolingLayer, arm_compute::ITensor, TargetHint::NEON>(input, output, pool_info);
-}
-} // namespace
-
PoolingLayer::PoolingLayer(const PoolingLayerInfo pool_info)
: _pool_info(pool_info)
{
@@ -72,27 +39,17 @@ std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(GraphCont
ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
-
arm_compute::ITensor *in = input->tensor();
arm_compute::ITensor *out = output->tensor();
+ _target_hint = ctx.hints().target_hint();
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(in, out, _pool_info);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLPoolingLayer");
- }
- else
- {
- func = instantiate<TargetHint::NEON>(in, out, _pool_info);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEPoolingLayer");
- }
-
- ARM_COMPUTE_LOG_GRAPH_INFO(" Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Pooling info: " << _pool_info << std::endl);
+ // Create node context
+ NodeContext node_ctx(OperationType::PoolingLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(out);
+ node_ctx.add_parameter<PoolingLayerInfo>("PoolingLayerInfo", _pool_info);
- return func;
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::PoolingLayer, _target_hint)->configure(node_ctx);
}
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp
index 3cdbc9c96a..62057c770c 100644
--- a/src/graph/nodes/SoftmaxLayer.cpp
+++ b/src/graph/nodes/SoftmaxLayer.cpp
@@ -23,70 +23,27 @@
*/
#include "arm_compute/graph/nodes/SoftmaxLayer.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
-#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
-#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
using namespace arm_compute::graph;
-namespace
-{
-template <typename SoftmaxType, typename TensorType, TargetHint hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *output)
-{
- auto softmax = arm_compute::support::cpp14::make_unique<SoftmaxType>();
- softmax->configure(
- dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(output));
-
- return std::move(softmax);
-}
-
-template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *output);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *output)
-{
- return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, output);
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *output)
-{
- return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::ITensor, TargetHint::NEON>(input, output);
-}
-} // namespace
-
std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
{
ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
- std::unique_ptr<arm_compute::IFunction> func;
- _target_hint = ctx.hints().target_hint();
-
arm_compute::ITensor *in = input->tensor();
arm_compute::ITensor *out = output->tensor();
+ _target_hint = ctx.hints().target_hint();
- if(_target_hint == TargetHint::OPENCL)
- {
- func = instantiate<TargetHint::OPENCL>(in, out);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLSoftmaxLayer");
- }
- else
- {
- func = instantiate<TargetHint::NEON>(in, out);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NESoftmaxLayer");
- }
-
- ARM_COMPUTE_LOG_GRAPH_INFO(" Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << std::endl);
+ // Create node context
+ NodeContext node_ctx(OperationType::SoftmaxLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(out);
- return func;
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::SoftmaxLayer, _target_hint)->configure(node_ctx);
}
diff --git a/src/graph/operations/CL/CLActivationLayerOperation.cpp b/src/graph/operations/CL/CLActivationLayerOperation.cpp
deleted file mode 100644
index d0045e2500..0000000000
--- a/src/graph/operations/CL/CLActivationLayerOperation.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/operations/CL/CLActivationLayerOperation.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/graph/OperationRegistrar.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphTypePrinter.h"
-#include "utils/TypePrinter.h"
-
-#include <memory>
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> CLActivationLayerOperation::configure(NodeContext &ctx)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo");
-
- // Create and configure function
- auto activation = arm_compute::support::cpp14::make_unique<CLActivationLayer>();
- activation->configure(in, out, act_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLActivationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Activation function: " << act_info.activation()
- << " a: " << act_info.a()
- << " b: " << act_info.b()
- << std::endl);
-
- return std::move(activation);
-}
-
-TargetHint CLActivationLayerOperation::target() const
-{
- return TargetHint::OPENCL;
-}
-
-static detail::OperationRegistrar<CLActivationLayerOperation> registrar("ActivationLayer"); \ No newline at end of file
diff --git a/src/graph/operations/CLSimpleOperations.cpp b/src/graph/operations/CLSimpleOperations.cpp
new file mode 100644
index 0000000000..b4c217b1a4
--- /dev/null
+++ b/src/graph/operations/CLSimpleOperations.cpp
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph/IOperation.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistrar.h"
+#include "arm_compute/graph/Types.h"
+#include "arm_compute/runtime/CL/CLFunctions.h"
+#include "support/ToolchainSupport.h"
+#include "utils/GraphTypePrinter.h"
+#include "utils/TypePrinter.h"
+
+#include <memory>
+
+using namespace arm_compute::graph;
+
+/* Activation Layer */
+REGISTER_SIMPLE_OPERATION(CLActivationLayerOperation, OPENCL, OperationType::ActivationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+ const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo");
+
+ // Create and configure function
+ auto activation = arm_compute::support::cpp14::make_unique<arm_compute::CLActivationLayer>();
+ activation->configure(in, out, act_info);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLActivationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Activation function: " << act_info.activation()
+ << " a: " << act_info.a()
+ << " b: " << act_info.b()
+ << std::endl);
+
+ return std::move(activation);
+}
+
+/* Batch Normalization Layer */
+REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationType::BatchNormalizationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 5);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *mean = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
+ auto *var = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
+ auto *beta = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3));
+ auto *gamma = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+ const auto epsilon = ctx.parameter<float>("epsilon");
+
+ // Create and configure function
+ auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLBatchNormalizationLayer>();
+ batch_norm->configure(in, out, mean, var, beta, gamma, epsilon);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLBatchNormalizationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Mean shape: " << mean->info()->tensor_shape()
+ << " Var shape: " << var->info()->tensor_shape()
+ << " Beta shape: " << beta->info()->tensor_shape()
+ << " Gamma shape: " << gamma->info()->tensor_shape()
+ << " Epsilon: " << epsilon
+ << std::endl);
+
+ return std::move(batch_norm);
+}
+
+/* Floor Layer */
+REGISTER_SIMPLE_OPERATION(CLFloorLayerOperation, OPENCL, OperationType::FloorLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto floor = arm_compute::support::cpp14::make_unique<arm_compute::CLFloor>();
+ floor->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFloorLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(floor);
+}
+
+/* Fully Connected Layer */
+REGISTER_SIMPLE_OPERATION(CLFullyConnectedLayer, OPENCL, OperationType::FullyConnectedLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 3);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *weights = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
+ auto *biases = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto fc = arm_compute::support::cpp14::make_unique<arm_compute::CLFullyConnectedLayer>();
+ fc->configure(in, weights, biases, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFullyConnectedLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Weights shape: " << weights->info()->tensor_shape()
+ << " Biases Shape: " << biases->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(fc);
+}
+
+/* L2 Normalize Layer */
+REGISTER_SIMPLE_OPERATION(CLL2NormalizeLayerOperation, OPENCL, OperationType::L2NormalizeLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+ const auto axis = ctx.parameter<unsigned int>("axis");
+ const auto epsilon = ctx.parameter<float>("epsilon");
+
+ // Create and configure function
+ auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLL2Normalize>();
+ l2_norm->configure(in, out, axis, epsilon);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLL2NormalizeLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Axis: " << axis
+ << " Epsilon: " << epsilon
+ << std::endl);
+
+ return std::move(l2_norm);
+}
+
+/* Normalization Layer */
+REGISTER_SIMPLE_OPERATION(CLNormalizationLayerOperation, OPENCL, OperationType::NormalizationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+ const auto norm_info = ctx.parameter<NormalizationLayerInfo>("NormalizationLayerInfo");
+
+ // Create and configure function
+ auto norm = arm_compute::support::cpp14::make_unique<arm_compute::CLNormalizationLayer>();
+ norm->configure(in, out, norm_info);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLNormalizationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Normalization info: " << norm_info
+ << std::endl);
+
+ return std::move(norm);
+}
+
+/* Pooling Layer */
+REGISTER_SIMPLE_OPERATION(CLPoolingLayerOperation, OPENCL, OperationType::PoolingLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+ const auto pool_info = ctx.parameter<PoolingLayerInfo>("PoolingLayerInfo");
+
+ // Create and configure function
+ auto pool = arm_compute::support::cpp14::make_unique<arm_compute::CLPoolingLayer>();
+ pool->configure(in, out, pool_info);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLPoolingLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Pooling info: " << pool_info
+ << std::endl);
+
+ return std::move(pool);
+}
+
+/* Softmax Layer */
+REGISTER_SIMPLE_OPERATION(CLSoftmaxLayerOperation, OPENCL, OperationType::SoftmaxLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto smx = arm_compute::support::cpp14::make_unique<arm_compute::CLSoftmaxLayer>();
+ smx->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLSoftmaxLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(smx);
+} \ No newline at end of file
diff --git a/src/graph/operations/NEON/NEActivationLayerOperation.cpp b/src/graph/operations/NEON/NEActivationLayerOperation.cpp
deleted file mode 100644
index 355fd38f67..0000000000
--- a/src/graph/operations/NEON/NEActivationLayerOperation.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/operations/NEON/NEActivationLayerOperation.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/graph/OperationRegistrar.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphTypePrinter.h"
-#include "utils/TypePrinter.h"
-
-#include <memory>
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> NEActivationLayerOperation::configure(NodeContext &ctx)
-{
- ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
- ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
- ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
- // Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo");
-
- // Create and configure function
- auto activation = arm_compute::support::cpp14::make_unique<NEActivationLayer>();
- activation->configure(in, out, act_info);
-
- // Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEActivationLayer"
- << " Data Type: " << in->info()->data_type()
- << " Input shape: " << in->info()->tensor_shape()
- << " Output shape: " << out->info()->tensor_shape()
- << " Activation function: " << act_info.activation()
- << " a: " << act_info.a()
- << " b: " << act_info.b()
- << std::endl);
-
- return std::move(activation);
-}
-
-TargetHint NEActivationLayerOperation::target() const
-{
- return TargetHint::NEON;
-}
-
-static detail::OperationRegistrar<NEActivationLayerOperation> registrar("ActivationLayer"); \ No newline at end of file
diff --git a/src/graph/operations/NESimpleOperations.cpp b/src/graph/operations/NESimpleOperations.cpp
new file mode 100644
index 0000000000..59f252ae44
--- /dev/null
+++ b/src/graph/operations/NESimpleOperations.cpp
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/graph/IOperation.h"
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistrar.h"
+#include "arm_compute/graph/Types.h"
+#include "arm_compute/runtime/NEON/NEFunctions.h"
+#include "support/ToolchainSupport.h"
+#include "utils/GraphTypePrinter.h"
+#include "utils/TypePrinter.h"
+
+#include <memory>
+
+using namespace arm_compute::graph;
+
+/* Activation Layer */
+REGISTER_SIMPLE_OPERATION(NEActivationLayerOperation, NEON, OperationType::ActivationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+ const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo");
+
+ // Create and configure function
+ auto activation = arm_compute::support::cpp14::make_unique<arm_compute::NEActivationLayer>();
+ activation->configure(in, out, act_info);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEActivationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Activation function: " << act_info.activation()
+ << " a: " << act_info.a()
+ << " b: " << act_info.b()
+ << std::endl);
+
+ return std::move(activation);
+}
+
+/* Batch Normalization Layer */
+REGISTER_SIMPLE_OPERATION(NEBatchNormalizationLayerOperation, NEON, OperationType::BatchNormalizationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 5);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(3)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(4)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *mean = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
+ auto *var = dynamic_cast<arm_compute::ITensor *>(ctx.input(2));
+ auto *beta = dynamic_cast<arm_compute::ITensor *>(ctx.input(3));
+ auto *gamma = dynamic_cast<arm_compute::ITensor *>(ctx.input(4));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+ const auto epsilon = ctx.parameter<float>("epsilon");
+
+ // Create and configure function
+ auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEBatchNormalizationLayer>();
+ batch_norm->configure(in, out, mean, var, beta, gamma, epsilon);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEBatchNormalizationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Mean shape: " << mean->info()->tensor_shape()
+ << " Var shape: " << var->info()->tensor_shape()
+ << " Beta shape: " << beta->info()->tensor_shape()
+ << " Gamma shape: " << gamma->info()->tensor_shape()
+ << " Epsilon: " << epsilon
+ << std::endl);
+
+ return std::move(batch_norm);
+}
+
+/* Floor Layer */
+REGISTER_SIMPLE_OPERATION(NEFloorLayerOperation, NEON, OperationType::FloorLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto floor = arm_compute::support::cpp14::make_unique<arm_compute::NEFloor>();
+ floor->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFloorLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(floor);
+}
+
+/* Fully Connected Layer */
+REGISTER_SIMPLE_OPERATION(NEFullyConnectedLayer, NEON, OperationType::FullyConnectedLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 3);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *weights = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
+ auto *biases = dynamic_cast<arm_compute::ITensor *>(ctx.input(2));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto fc = arm_compute::support::cpp14::make_unique<arm_compute::NEFullyConnectedLayer>();
+ fc->configure(in, weights, biases, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFullyConnectedLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Weights shape: " << weights->info()->tensor_shape()
+ << " Biases Shape: " << biases->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(fc);
+}
+
+/* L2 Normalize Layer */
+REGISTER_SIMPLE_OPERATION(NEL2NormalizeLayerOperation, NEON, OperationType::L2NormalizeLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+ const auto axis = ctx.parameter<unsigned int>("axis");
+ const auto epsilon = ctx.parameter<float>("epsilon");
+
+ // Create and configure function
+ auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEL2Normalize>();
+ l2_norm->configure(in, out, axis, epsilon);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEL2NormalizeLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Axis: " << axis
+ << " Epsilon: " << epsilon
+ << std::endl);
+
+ return std::move(l2_norm);
+}
+
+/* Normalization Layer */
+REGISTER_SIMPLE_OPERATION(NENormalizationLayerOperation, NEON, OperationType::NormalizationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+ const auto norm_info = ctx.parameter<NormalizationLayerInfo>("NormalizationLayerInfo");
+
+ // Create and configure function
+ auto norm = arm_compute::support::cpp14::make_unique<arm_compute::NENormalizationLayer>();
+ norm->configure(in, out, norm_info);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NENormalizationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Normalization info: " << norm_info
+ << std::endl);
+
+ return std::move(norm);
+}
+
+/* Pooling Layer */
+REGISTER_SIMPLE_OPERATION(NEPoolingLayerOperation, NEON, OperationType::PoolingLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+ const auto pool_info = ctx.parameter<PoolingLayerInfo>("PoolingLayerInfo");
+
+ // Create and configure function
+ auto pool = arm_compute::support::cpp14::make_unique<arm_compute::NEPoolingLayer>();
+ pool->configure(in, out, pool_info);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEPoolingLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Pooling info: " << pool_info
+ << std::endl);
+
+ return std::move(pool);
+}
+
+/* Softmax Layer */
+REGISTER_SIMPLE_OPERATION(NESoftmaxLayerOperation, NEON, OperationType::SoftmaxLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto smx = arm_compute::support::cpp14::make_unique<arm_compute::NESoftmaxLayer>();
+ smx->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NESoftmaxLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(smx);
+} \ No newline at end of file