diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2017-10-25 18:26:46 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:35:24 +0000 |
commit | 407c3e6e383affa7ebe72ce5f50fcf163ff037a3 (patch) | |
tree | c516e583fe0a2e5ee9dfc218fe10880000edcf91 /src/graph/operations | |
parent | e500747b5c1d27aeffae316c8190f6d169bb2fbd (diff) | |
download | ComputeLibrary-407c3e6e383affa7ebe72ce5f50fcf163ff037a3.tar.gz |
COMPMID-630: Rework nodes
Reworked node:
-BatchNormalization
-Floor
-FullyConncted
-L2Normalize
-Normalization
-Pooling
-Softmax
Change-Id: I4c71cfffb1f59aac3326ba8b1f831339c5244394
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/93134
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/graph/operations')
-rw-r--r-- | src/graph/operations/CL/CLActivationLayerOperation.cpp | 73 | ||||
-rw-r--r-- | src/graph/operations/CLSimpleOperations.cpp | 277 | ||||
-rw-r--r-- | src/graph/operations/NEON/NEActivationLayerOperation.cpp | 73 | ||||
-rw-r--r-- | src/graph/operations/NESimpleOperations.cpp | 277 |
4 files changed, 554 insertions, 146 deletions
diff --git a/src/graph/operations/CL/CLActivationLayerOperation.cpp b/src/graph/operations/CL/CLActivationLayerOperation.cpp deleted file mode 100644 index d0045e2500..0000000000 --- a/src/graph/operations/CL/CLActivationLayerOperation.cpp +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/graph/operations/CL/CLActivationLayerOperation.h" - -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/graph/OperationRegistrar.h" -#include "arm_compute/graph/Types.h" -#include "arm_compute/runtime/CL/functions/CLActivationLayer.h" -#include "support/ToolchainSupport.h" -#include "utils/GraphTypePrinter.h" -#include "utils/TypePrinter.h" - -#include <memory> - -using namespace arm_compute::graph; - -std::unique_ptr<arm_compute::IFunction> CLActivationLayerOperation::configure(NodeContext &ctx) -{ - ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); - ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); - ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr); - ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr); - - // Extract IO and info - auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)); - auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)); - const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo"); - - // Create and configure function - auto activation = arm_compute::support::cpp14::make_unique<CLActivationLayer>(); - activation->configure(in, out, act_info); - - // Log info - ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLActivationLayer" - << " Data Type: " << in->info()->data_type() - << " Input shape: " << in->info()->tensor_shape() - << " Output shape: " << out->info()->tensor_shape() - << " Activation function: " << act_info.activation() - << " a: " << act_info.a() - << " b: " << act_info.b() - << std::endl); - - return std::move(activation); -} - -TargetHint CLActivationLayerOperation::target() const -{ - return TargetHint::OPENCL; -} - -static detail::OperationRegistrar<CLActivationLayerOperation> registrar("ActivationLayer");
\ No newline at end of file diff --git a/src/graph/operations/CLSimpleOperations.cpp b/src/graph/operations/CLSimpleOperations.cpp new file mode 100644 index 0000000000..b4c217b1a4 --- /dev/null +++ b/src/graph/operations/CLSimpleOperations.cpp @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/graph/IOperation.h" +#include "arm_compute/graph/NodeContext.h" +#include "arm_compute/graph/OperationRegistrar.h" +#include "arm_compute/graph/Types.h" +#include "arm_compute/runtime/CL/CLFunctions.h" +#include "support/ToolchainSupport.h" +#include "utils/GraphTypePrinter.h" +#include "utils/TypePrinter.h" + +#include <memory> + +using namespace arm_compute::graph; + +/* Activation Layer */ +REGISTER_SIMPLE_OPERATION(CLActivationLayerOperation, OPENCL, OperationType::ActivationLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)); + const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo"); + + // Create and configure function + auto activation = arm_compute::support::cpp14::make_unique<arm_compute::CLActivationLayer>(); + activation->configure(in, out, act_info); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLActivationLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Activation function: " << act_info.activation() + << " a: " << act_info.a() + << " b: " << act_info.b() + << std::endl); + + return std::move(activation); +} + +/* Batch Normalization Layer */ +REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationType::BatchNormalizationLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 5); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)); + auto *mean = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)); + auto *var = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)); + auto *beta = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3)); + auto *gamma = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4)); + auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)); + const auto epsilon = ctx.parameter<float>("epsilon"); + + // Create and configure function + auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLBatchNormalizationLayer>(); + batch_norm->configure(in, out, mean, var, beta, gamma, epsilon); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLBatchNormalizationLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Mean shape: " << mean->info()->tensor_shape() + << " Var shape: " << var->info()->tensor_shape() + << " Beta shape: " << beta->info()->tensor_shape() + << " Gamma shape: " << gamma->info()->tensor_shape() + << " Epsilon: " << epsilon + << std::endl); + + return std::move(batch_norm); +} + +/* Floor Layer */ +REGISTER_SIMPLE_OPERATION(CLFloorLayerOperation, OPENCL, OperationType::FloorLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)); + + // Create and configure function + auto floor = arm_compute::support::cpp14::make_unique<arm_compute::CLFloor>(); + floor->configure(in, out); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFloorLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << std::endl); + + return std::move(floor); +} + +/* Fully Connected Layer */ +REGISTER_SIMPLE_OPERATION(CLFullyConnectedLayer, OPENCL, OperationType::FullyConnectedLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 3); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)); + auto *weights = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)); + auto *biases = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)); + auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)); + + // Create and configure function + auto fc = arm_compute::support::cpp14::make_unique<arm_compute::CLFullyConnectedLayer>(); + fc->configure(in, weights, biases, out); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFullyConnectedLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Weights shape: " << weights->info()->tensor_shape() + << " Biases Shape: " << biases->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << std::endl); + + return std::move(fc); +} + +/* L2 Normalize Layer */ +REGISTER_SIMPLE_OPERATION(CLL2NormalizeLayerOperation, OPENCL, OperationType::L2NormalizeLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)); + const auto axis = ctx.parameter<unsigned int>("axis"); + const auto epsilon = ctx.parameter<float>("epsilon"); + + // Create and configure function + auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLL2Normalize>(); + l2_norm->configure(in, out, axis, epsilon); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLL2NormalizeLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Axis: " << axis + << " Epsilon: " << epsilon + << std::endl); + + return std::move(l2_norm); +} + +/* Normalization Layer */ +REGISTER_SIMPLE_OPERATION(CLNormalizationLayerOperation, OPENCL, OperationType::NormalizationLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)); + const auto norm_info = ctx.parameter<NormalizationLayerInfo>("NormalizationLayerInfo"); + + // Create and configure function + auto norm = arm_compute::support::cpp14::make_unique<arm_compute::CLNormalizationLayer>(); + norm->configure(in, out, norm_info); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLNormalizationLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Normalization info: " << norm_info + << std::endl); + + return std::move(norm); +} + +/* Pooling Layer */ +REGISTER_SIMPLE_OPERATION(CLPoolingLayerOperation, OPENCL, OperationType::PoolingLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)); + const auto pool_info = ctx.parameter<PoolingLayerInfo>("PoolingLayerInfo"); + + // Create and configure function + auto pool = arm_compute::support::cpp14::make_unique<arm_compute::CLPoolingLayer>(); + pool->configure(in, out, pool_info); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLPoolingLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Pooling info: " << pool_info + << std::endl); + + return std::move(pool); +} + +/* Softmax Layer */ +REGISTER_SIMPLE_OPERATION(CLSoftmaxLayerOperation, OPENCL, OperationType::SoftmaxLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)); + + // Create and configure function + auto smx = arm_compute::support::cpp14::make_unique<arm_compute::CLSoftmaxLayer>(); + smx->configure(in, out); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLSoftmaxLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << std::endl); + + return std::move(smx); +}
\ No newline at end of file diff --git a/src/graph/operations/NEON/NEActivationLayerOperation.cpp b/src/graph/operations/NEON/NEActivationLayerOperation.cpp deleted file mode 100644 index 355fd38f67..0000000000 --- a/src/graph/operations/NEON/NEActivationLayerOperation.cpp +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/graph/operations/NEON/NEActivationLayerOperation.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/graph/OperationRegistrar.h" -#include "arm_compute/graph/Types.h" -#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h" -#include "support/ToolchainSupport.h" -#include "utils/GraphTypePrinter.h" -#include "utils/TypePrinter.h" - -#include <memory> - -using namespace arm_compute::graph; - -std::unique_ptr<arm_compute::IFunction> NEActivationLayerOperation::configure(NodeContext &ctx) -{ - ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); - ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); - ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr); - ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr); - - // Extract IO and info - auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0)); - auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0)); - const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo"); - - // Create and configure function - auto activation = arm_compute::support::cpp14::make_unique<NEActivationLayer>(); - activation->configure(in, out, act_info); - - // Log info - ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEActivationLayer" - << " Data Type: " << in->info()->data_type() - << " Input shape: " << in->info()->tensor_shape() - << " Output shape: " << out->info()->tensor_shape() - << " Activation function: " << act_info.activation() - << " a: " << act_info.a() - << " b: " << act_info.b() - << std::endl); - - return std::move(activation); -} - -TargetHint NEActivationLayerOperation::target() const -{ - return TargetHint::NEON; -} - -static detail::OperationRegistrar<NEActivationLayerOperation> registrar("ActivationLayer");
\ No newline at end of file diff --git a/src/graph/operations/NESimpleOperations.cpp b/src/graph/operations/NESimpleOperations.cpp new file mode 100644 index 0000000000..59f252ae44 --- /dev/null +++ b/src/graph/operations/NESimpleOperations.cpp @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Error.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/graph/IOperation.h" +#include "arm_compute/graph/NodeContext.h" +#include "arm_compute/graph/OperationRegistrar.h" +#include "arm_compute/graph/Types.h" +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "support/ToolchainSupport.h" +#include "utils/GraphTypePrinter.h" +#include "utils/TypePrinter.h" + +#include <memory> + +using namespace arm_compute::graph; + +/* Activation Layer */ +REGISTER_SIMPLE_OPERATION(NEActivationLayerOperation, NEON, OperationType::ActivationLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0)); + const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo"); + + // Create and configure function + auto activation = arm_compute::support::cpp14::make_unique<arm_compute::NEActivationLayer>(); + activation->configure(in, out, act_info); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEActivationLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Activation function: " << act_info.activation() + << " a: " << act_info.a() + << " b: " << act_info.b() + << std::endl); + + return std::move(activation); +} + +/* Batch Normalization Layer */ +REGISTER_SIMPLE_OPERATION(NEBatchNormalizationLayerOperation, NEON, OperationType::BatchNormalizationLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 5); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(3)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(4)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0)); + auto *mean = dynamic_cast<arm_compute::ITensor *>(ctx.input(1)); + auto *var = dynamic_cast<arm_compute::ITensor *>(ctx.input(2)); + auto *beta = dynamic_cast<arm_compute::ITensor *>(ctx.input(3)); + auto *gamma = dynamic_cast<arm_compute::ITensor *>(ctx.input(4)); + auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0)); + const auto epsilon = ctx.parameter<float>("epsilon"); + + // Create and configure function + auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEBatchNormalizationLayer>(); + batch_norm->configure(in, out, mean, var, beta, gamma, epsilon); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEBatchNormalizationLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Mean shape: " << mean->info()->tensor_shape() + << " Var shape: " << var->info()->tensor_shape() + << " Beta shape: " << beta->info()->tensor_shape() + << " Gamma shape: " << gamma->info()->tensor_shape() + << " Epsilon: " << epsilon + << std::endl); + + return std::move(batch_norm); +} + +/* Floor Layer */ +REGISTER_SIMPLE_OPERATION(NEFloorLayerOperation, NEON, OperationType::FloorLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0)); + + // Create and configure function + auto floor = arm_compute::support::cpp14::make_unique<arm_compute::NEFloor>(); + floor->configure(in, out); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFloorLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << std::endl); + + return std::move(floor); +} + +/* Fully Connected Layer */ +REGISTER_SIMPLE_OPERATION(NEFullyConnectedLayer, NEON, OperationType::FullyConnectedLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 3); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0)); + auto *weights = dynamic_cast<arm_compute::ITensor *>(ctx.input(1)); + auto *biases = dynamic_cast<arm_compute::ITensor *>(ctx.input(2)); + auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0)); + + // Create and configure function + auto fc = arm_compute::support::cpp14::make_unique<arm_compute::NEFullyConnectedLayer>(); + fc->configure(in, weights, biases, out); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFullyConnectedLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Weights shape: " << weights->info()->tensor_shape() + << " Biases Shape: " << biases->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << std::endl); + + return std::move(fc); +} + +/* L2 Normalize Layer */ +REGISTER_SIMPLE_OPERATION(NEL2NormalizeLayerOperation, NEON, OperationType::L2NormalizeLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0)); + const auto axis = ctx.parameter<unsigned int>("axis"); + const auto epsilon = ctx.parameter<float>("epsilon"); + + // Create and configure function + auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEL2Normalize>(); + l2_norm->configure(in, out, axis, epsilon); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEL2NormalizeLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Axis: " << axis + << " Epsilon: " << epsilon + << std::endl); + + return std::move(l2_norm); +} + +/* Normalization Layer */ +REGISTER_SIMPLE_OPERATION(NENormalizationLayerOperation, NEON, OperationType::NormalizationLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0)); + const auto norm_info = ctx.parameter<NormalizationLayerInfo>("NormalizationLayerInfo"); + + // Create and configure function + auto norm = arm_compute::support::cpp14::make_unique<arm_compute::NENormalizationLayer>(); + norm->configure(in, out, norm_info); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NENormalizationLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Normalization info: " << norm_info + << std::endl); + + return std::move(norm); +} + +/* Pooling Layer */ +REGISTER_SIMPLE_OPERATION(NEPoolingLayerOperation, NEON, OperationType::PoolingLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0)); + const auto pool_info = ctx.parameter<PoolingLayerInfo>("PoolingLayerInfo"); + + // Create and configure function + auto pool = arm_compute::support::cpp14::make_unique<arm_compute::NEPoolingLayer>(); + pool->configure(in, out, pool_info); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEPoolingLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << " Pooling info: " << pool_info + << std::endl); + + return std::move(pool); +} + +/* Softmax Layer */ +REGISTER_SIMPLE_OPERATION(NESoftmaxLayerOperation, NEON, OperationType::SoftmaxLayer) +{ + ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1); + ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr); + ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr); + + // Extract IO and info + auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0)); + auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0)); + + // Create and configure function + auto smx = arm_compute::support::cpp14::make_unique<arm_compute::NESoftmaxLayer>(); + smx->configure(in, out); + + // Log info + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NESoftmaxLayer" + << " Data Type: " << in->info()->data_type() + << " Input shape: " << in->info()->tensor_shape() + << " Output shape: " << out->info()->tensor_shape() + << std::endl); + + return std::move(smx); +}
\ No newline at end of file |