diff options
author | Anthony Barbier <anthony.barbier@arm.com> | 2017-09-04 18:44:23 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-09-17 13:03:09 +0100 |
commit | 6ff3b19ee6120edf015fad8caab2991faa3070af (patch) | |
tree | a7a6dcd16dfd56d79fa1b56a313caeebcc939b68 /tests/benchmark/CL/ActivationLayer.cpp | |
download | ComputeLibrary-6ff3b19ee6120edf015fad8caab2991faa3070af.tar.gz |
COMPMID-344 Updated doxygen
Change-Id: I32f7b84daa560e460b77216add529c8fa8b327ae
Diffstat (limited to 'tests/benchmark/CL/ActivationLayer.cpp')
-rw-r--r-- | tests/benchmark/CL/ActivationLayer.cpp | 212 |
1 files changed, 212 insertions, 0 deletions
diff --git a/tests/benchmark/CL/ActivationLayer.cpp b/tests/benchmark/CL/ActivationLayer.cpp new file mode 100644 index 0000000000..5180d3d900 --- /dev/null +++ b/tests/benchmark/CL/ActivationLayer.cpp @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "CL/CLAccessor.h" +#include "CL/Helper.h" +#include "Globals.h" +#include "TensorLibrary.h" +#include "benchmark/Datasets.h" +#include "benchmark/Profiler.h" +#include "benchmark/WallClockTimer.h" + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/CL/CLScheduler.h" +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "arm_compute/runtime/CL/functions/CLActivationLayer.h" + +#include "benchmark/benchmark_api.h" + +using namespace arm_compute; +using namespace arm_compute::test; +using namespace arm_compute::test::benchmark; +using namespace arm_compute::test::cl; + +#include "benchmark/common/ActivationLayer.h" + +namespace +{ +using ActivationLayerAlexNet = ActivationLayer<AlexNetActivationLayerDataset, CLTensor, CLAccessor, CLActivationLayer>; +using ActivationLayerLeNet5 = ActivationLayer<LeNet5ActivationLayerDataset, CLTensor, CLAccessor, CLActivationLayer>; +using ActivationLayerGoogLeNet = ActivationLayer<GoogLeNetActivationLayerDataset, CLTensor, CLAccessor, CLActivationLayer>; +} // namespace + +BENCHMARK_DEFINE_F(ActivationLayerAlexNet, cl_alexnet) +(::benchmark::State &state) +{ + while(state.KeepRunning()) + { + // Run function + profiler.start(); + act_layer.run(); + CLScheduler::get().sync(); + profiler.stop(); + } +} + +BENCHMARK_REGISTER_F(ActivationLayerAlexNet, cl_alexnet) +->Threads(1) +->Apply(DataSetArgBatched<AlexNetActivationLayerDataset, 0, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerAlexNet, cl_alexnet) +->Threads(1) +->Apply(DataSetArgBatched<AlexNetActivationLayerDataset, 1, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerAlexNet, cl_alexnet) +->Threads(1) +->Apply(DataSetArgBatched<AlexNetActivationLayerDataset, 2, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerAlexNet, cl_alexnet) +->Threads(1) +->Apply(DataSetArgBatched<AlexNetActivationLayerDataset, 3, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerAlexNet, cl_alexnet) +->Threads(1) +->Apply(DataSetArgBatched<AlexNetActivationLayerDataset, 4, 1, 4, 8>); + +BENCHMARK_DEFINE_F(ActivationLayerLeNet5, cl_lenet5) +(::benchmark::State &state) +{ + while(state.KeepRunning()) + { + // Run function + profiler.start(); + act_layer.run(); + CLScheduler::get().sync(); + profiler.stop(); + } +} + +BENCHMARK_REGISTER_F(ActivationLayerLeNet5, cl_lenet5) +->Threads(1) +->Apply(DataSetArgBatched<LeNet5ActivationLayerDataset, 0, 1, 4, 8>); + +BENCHMARK_DEFINE_F(ActivationLayerGoogLeNet, cl_googlenet) +(::benchmark::State &state) +{ + while(state.KeepRunning()) + { + // Run function + profiler.start(); + act_layer.run(); + CLScheduler::get().sync(); + profiler.stop(); + } +} + +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 0, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 1, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 2, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 3, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 4, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 5, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 6, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 7, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 8, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 9, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 10, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 11, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 12, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 13, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 14, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 15, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 16, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 17, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 18, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 19, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 20, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 21, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 22, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 23, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 24, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 25, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 26, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 27, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 28, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 29, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 30, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 31, 1, 4, 8>); +BENCHMARK_REGISTER_F(ActivationLayerGoogLeNet, cl_googlenet) +->Threads(1) +->Apply(DataSetArgBatched<GoogLeNetActivationLayerDataset, 32, 1, 4, 8>); |