aboutsummaryrefslogtreecommitdiff
path: root/src/graph
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2018-02-07 15:38:12 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:47:18 +0000
commit1167487ea8e54a76d0a3625e0aa84e2ad9ffd317 (patch)
tree287dbc45e895c6b637fecc692c04bd4ae59580ae /src/graph
parent4e1e7dcd581adecd5ad9c0f9503fc3c43f8222ef (diff)
downloadComputeLibrary-1167487ea8e54a76d0a3625e0aa84e2ad9ffd317.tar.gz
COMPMID-897 Merge batch normalization with bounded relu
Change-Id: I9a607fe620f795cdea1a99fdd3f5f8c2fc76f980 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/119234 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/graph')
-rw-r--r--src/graph/nodes/BatchNormalizationLayer.cpp3
-rw-r--r--src/graph/operations/CLSimpleOperations.cpp22
-rw-r--r--src/graph/operations/NESimpleOperations.cpp20
3 files changed, 27 insertions, 18 deletions
diff --git a/src/graph/nodes/BatchNormalizationLayer.cpp b/src/graph/nodes/BatchNormalizationLayer.cpp
index 7851aa5b9e..24287ac61a 100644
--- a/src/graph/nodes/BatchNormalizationLayer.cpp
+++ b/src/graph/nodes/BatchNormalizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -77,6 +77,7 @@ std::unique_ptr<arm_compute::IFunction> BatchNormalizationLayer::instantiate_nod
node_ctx.add_input(_gamma.tensor());
node_ctx.add_output(out);
node_ctx.add_parameter<float>("epsilon", _epsilon);
+ node_ctx.add_parameter<ActivationLayerInfo>("act_info", _act_info);
// Configure operation
auto func = OperationRegistry::get().find_operation(OperationType::BatchNormalizationLayer, _target_hint)->configure(node_ctx);
diff --git a/src/graph/operations/CLSimpleOperations.cpp b/src/graph/operations/CLSimpleOperations.cpp
index 61315e73b2..94e3fe15f7 100644
--- a/src/graph/operations/CLSimpleOperations.cpp
+++ b/src/graph/operations/CLSimpleOperations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -79,17 +79,18 @@ REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationT
ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
// Extract IO and info
- auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
- auto *mean = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
- auto *var = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
- auto *beta = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3));
- auto *gamma = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4));
- auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
- const auto epsilon = ctx.parameter<float>("epsilon");
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *mean = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
+ auto *var = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
+ auto *beta = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3));
+ auto *gamma = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+ const auto epsilon = ctx.parameter<float>("epsilon");
+ const auto act_info = ctx.parameter<ActivationLayerInfo>("act_info");
// Create and configure function
auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLBatchNormalizationLayer>();
- batch_norm->configure(in, out, mean, var, beta, gamma, epsilon);
+ batch_norm->configure(in, out, mean, var, beta, gamma, epsilon, act_info);
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLBatchNormalizationLayer"
@@ -101,6 +102,9 @@ REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationT
<< " Beta shape: " << beta->info()->tensor_shape()
<< " Gamma shape: " << gamma->info()->tensor_shape()
<< " Epsilon: " << epsilon
+ << " Activation function: " << act_info.activation()
+ << " a: " << act_info.a()
+ << " b: " << act_info.b()
<< std::endl);
return std::move(batch_norm);
diff --git a/src/graph/operations/NESimpleOperations.cpp b/src/graph/operations/NESimpleOperations.cpp
index 5a00e230ea..265bed6b7a 100644
--- a/src/graph/operations/NESimpleOperations.cpp
+++ b/src/graph/operations/NESimpleOperations.cpp
@@ -79,17 +79,18 @@ REGISTER_SIMPLE_OPERATION(NEBatchNormalizationLayerOperation, NEON, OperationTyp
ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
// Extract IO and info
- auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
- auto *mean = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
- auto *var = dynamic_cast<arm_compute::ITensor *>(ctx.input(2));
- auto *beta = dynamic_cast<arm_compute::ITensor *>(ctx.input(3));
- auto *gamma = dynamic_cast<arm_compute::ITensor *>(ctx.input(4));
- auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
- const auto epsilon = ctx.parameter<float>("epsilon");
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *mean = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
+ auto *var = dynamic_cast<arm_compute::ITensor *>(ctx.input(2));
+ auto *beta = dynamic_cast<arm_compute::ITensor *>(ctx.input(3));
+ auto *gamma = dynamic_cast<arm_compute::ITensor *>(ctx.input(4));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+ const auto epsilon = ctx.parameter<float>("epsilon");
+ const auto act_info = ctx.parameter<ActivationLayerInfo>("act_info");
// Create and configure function
auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEBatchNormalizationLayer>();
- batch_norm->configure(in, out, mean, var, beta, gamma, epsilon);
+ batch_norm->configure(in, out, mean, var, beta, gamma, epsilon, act_info);
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEBatchNormalizationLayer"
@@ -101,6 +102,9 @@ REGISTER_SIMPLE_OPERATION(NEBatchNormalizationLayerOperation, NEON, OperationTyp
<< " Beta shape: " << beta->info()->tensor_shape()
<< " Gamma shape: " << gamma->info()->tensor_shape()
<< " Epsilon: " << epsilon
+ << " Activation function: " << act_info.activation()
+ << " a: " << act_info.a()
+ << " b: " << act_info.b()
<< std::endl);
return std::move(batch_norm);