aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-07-27 12:42:10 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commitead90b579a6d93af53e4e6e104c873b9dcc7ee25 (patch)
tree4439e6fe4066812f7335da95012aa05f202bfda6 /tests
parente2220551b7a64b929650ba9a60529c31e70c13c5 (diff)
downloadComputeLibrary-ead90b579a6d93af53e4e6e104c873b9dcc7ee25.tar.gz
COMPMID-1188: Remove graph system tests
Change-Id: I429087f8aa436cf0877c3abec8fd7201bec1b81c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/141661 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/benchmark/CL/SYSTEM/AlexNet.cpp71
-rw-r--r--tests/benchmark/CL/SYSTEM/LeNet5.cpp63
-rw-r--r--tests/benchmark/CL/SYSTEM/MobileNet.cpp65
-rw-r--r--tests/benchmark/CL/SYSTEM/MobileNetV1.cpp85
-rw-r--r--tests/benchmark/NEON/SYSTEM/AlexNet.cpp80
-rw-r--r--tests/benchmark/NEON/SYSTEM/LeNet5.cpp63
-rw-r--r--tests/benchmark/fixtures/AlexNetFixture.h95
-rw-r--r--tests/benchmark/fixtures/LeNet5Fixture.h84
-rw-r--r--tests/benchmark/fixtures/MobileNetFixture.h86
-rw-r--r--tests/benchmark/fixtures/MobileNetV1Fixture.h91
-rw-r--r--tests/networks/AlexNetNetwork.h646
-rw-r--r--tests/networks/LeNet5Network.h265
-rw-r--r--tests/networks/MobileNetNetwork.h314
-rw-r--r--tests/networks/MobileNetV1Network.h390
-rw-r--r--tests/validation/CL/SYSTEM/AlexNet.cpp112
-rw-r--r--tests/validation/CL/SYSTEM/LeNet5.cpp97
-rw-r--r--tests/validation/NEON/SYSTEM/AlexNet.cpp113
-rw-r--r--tests/validation/NEON/SYSTEM/LeNet5.cpp97
18 files changed, 0 insertions, 2817 deletions
diff --git a/tests/benchmark/CL/SYSTEM/AlexNet.cpp b/tests/benchmark/CL/SYSTEM/AlexNet.cpp
deleted file mode 100644
index f24803d71b..0000000000
--- a/tests/benchmark/CL/SYSTEM/AlexNet.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/CLSubTensor.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
-#include "arm_compute/runtime/CL/functions/CLNormalizationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
-#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
-#include "tests/CL/CLAccessor.h"
-#include "tests/benchmark/fixtures/AlexNetFixture.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "utils/TypePrinter.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-using CLAlexNetFixture = AlexNetFixture<ICLTensor,
- CLTensor,
- CLSubTensor,
- CLAccessor,
- CLActivationLayer,
- CLConvolutionLayer,
- CLDirectConvolutionLayer,
- CLFullyConnectedLayer,
- CLNormalizationLayer,
- CLPoolingLayer,
- CLSoftmaxLayer>;
-
-TEST_SUITE(CL)
-TEST_SUITE(SYSTEM_TESTS)
-
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNet, CLAlexNetFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::make("DataType", { DataType::F16, DataType::F32 }),
- framework::dataset::make("Batches", { 1, 2, 4 })));
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/benchmark/CL/SYSTEM/LeNet5.cpp b/tests/benchmark/CL/SYSTEM/LeNet5.cpp
deleted file mode 100644
index 0fa6791437..0000000000
--- a/tests/benchmark/CL/SYSTEM/LeNet5.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
-#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
-#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
-#include "tests/CL/CLAccessor.h"
-#include "tests/benchmark/fixtures/LeNet5Fixture.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "utils/TypePrinter.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-using CLLeNet5Fixture = LeNet5Fixture<CLTensor,
- CLAccessor,
- CLActivationLayer,
- CLConvolutionLayer,
- CLFullyConnectedLayer,
- CLPoolingLayer,
- CLSoftmaxLayer>;
-
-TEST_SUITE(CL)
-TEST_SUITE(SYSTEM_TESTS)
-
-REGISTER_FIXTURE_DATA_TEST_CASE(LeNet5, CLLeNet5Fixture, framework::DatasetMode::ALL,
- framework::dataset::make("Batches", { 1, 4, 8 }));
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/benchmark/CL/SYSTEM/MobileNet.cpp b/tests/benchmark/CL/SYSTEM/MobileNet.cpp
deleted file mode 100644
index 304e966eed..0000000000
--- a/tests/benchmark/CL/SYSTEM/MobileNet.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
-#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
-#include "tests/CL/CLAccessor.h"
-#include "tests/benchmark/fixtures/MobileNetFixture.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "utils/TypePrinter.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-using CLMobileNetFixture = MobileNetFixture<CLTensor,
- CLAccessor,
- CLActivationLayer,
- CLConvolutionLayer,
- CLDirectConvolutionLayer,
- CLDepthwiseConvolutionLayer,
- CLReshapeLayer,
- CLPoolingLayer>;
-
-TEST_SUITE(CL)
-TEST_SUITE(SYSTEM_TESTS)
-
-REGISTER_FIXTURE_DATA_TEST_CASE(MobileNet, CLMobileNetFixture, framework::DatasetMode::ALL,
- framework::dataset::make("Batches", { 1, 4, 8 }));
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/benchmark/CL/SYSTEM/MobileNetV1.cpp b/tests/benchmark/CL/SYSTEM/MobileNetV1.cpp
deleted file mode 100644
index de38d371f1..0000000000
--- a/tests/benchmark/CL/SYSTEM/MobileNetV1.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
-#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
-#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
-#include "tests/CL/CLAccessor.h"
-#include "tests/benchmark/fixtures/MobileNetV1Fixture.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "utils/TypePrinter.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-using CLMobileNetV1_224_Fixture = MobileNetV1Fixture<CLTensor,
- CLAccessor,
- CLActivationLayer,
- CLBatchNormalizationLayer,
- CLConvolutionLayer,
- CLDirectConvolutionLayer,
- CLDepthwiseConvolutionLayer3x3,
- CLReshapeLayer,
- CLPoolingLayer,
- CLSoftmaxLayer,
- 224>;
-
-using CLMobileNetV1_128_Fixture = MobileNetV1Fixture<CLTensor,
- CLAccessor,
- CLActivationLayer,
- CLBatchNormalizationLayer,
- CLConvolutionLayer,
- CLDirectConvolutionLayer,
- CLDepthwiseConvolutionLayer3x3,
- CLReshapeLayer,
- CLPoolingLayer,
- CLSoftmaxLayer,
- 128>;
-
-TEST_SUITE(CL)
-TEST_SUITE(SYSTEM_TESTS)
-
-REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetV1_224, CLMobileNetV1_224_Fixture, framework::DatasetMode::ALL,
- framework::dataset::make("Batches", { 1, 4, 8 }));
-
-REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetV1_128, CLMobileNetV1_128_Fixture, framework::DatasetMode::ALL,
- framework::dataset::make("Batches", { 1, 4, 8 }));
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/benchmark/NEON/SYSTEM/AlexNet.cpp b/tests/benchmark/NEON/SYSTEM/AlexNet.cpp
deleted file mode 100644
index e3491757f4..0000000000
--- a/tests/benchmark/NEON/SYSTEM/AlexNet.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
-#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
-#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
-#include "arm_compute/runtime/SubTensor.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/runtime/TensorAllocator.h"
-#include "tests/NEON/Accessor.h"
-#include "tests/benchmark/fixtures/AlexNetFixture.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "utils/TypePrinter.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-namespace
-{
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-const auto alex_net_data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
-#else /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-const auto alex_net_data_types = framework::dataset::make("DataType", { DataType::F32 });
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-} // namespace
-
-using NEAlexNetFixture = AlexNetFixture<ITensor,
- Tensor,
- SubTensor,
- Accessor,
- NEActivationLayer,
- NEConvolutionLayer,
- NEConvolutionLayer,
- NEFullyConnectedLayer,
- NENormalizationLayer,
- NEPoolingLayer,
- NESoftmaxLayer>;
-
-TEST_SUITE(NEON)
-TEST_SUITE(SYSTEM_TESTS)
-
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNet, NEAlexNetFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(alex_net_data_types,
- framework::dataset::make("Batches", { 1, 2, 4 })));
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/benchmark/NEON/SYSTEM/LeNet5.cpp b/tests/benchmark/NEON/SYSTEM/LeNet5.cpp
deleted file mode 100644
index d9d08f48bf..0000000000
--- a/tests/benchmark/NEON/SYSTEM/LeNet5.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
-#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/runtime/TensorAllocator.h"
-#include "tests/NEON/Accessor.h"
-#include "tests/benchmark/fixtures/LeNet5Fixture.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "utils/TypePrinter.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-using NELeNet5Fixture = LeNet5Fixture<Tensor,
- Accessor,
- NEActivationLayer,
- NEConvolutionLayer,
- NEFullyConnectedLayer,
- NEPoolingLayer,
- NESoftmaxLayer>;
-
-TEST_SUITE(NEON)
-TEST_SUITE(SYSTEM_TESTS)
-
-REGISTER_FIXTURE_DATA_TEST_CASE(LeNet5, NELeNet5Fixture, framework::DatasetMode::ALL,
- framework::dataset::make("Batches", { 1, 4, 8 }));
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/benchmark/fixtures/AlexNetFixture.h b/tests/benchmark/fixtures/AlexNetFixture.h
deleted file mode 100644
index 4662feb918..0000000000
--- a/tests/benchmark/fixtures/AlexNetFixture.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_ALEXNETFIXTURE
-#define ARM_COMPUTE_TEST_ALEXNETFIXTURE
-
-#include "tests/AssetsLibrary.h"
-#include "tests/Utils.h"
-#include "tests/framework/Fixture.h"
-#include "tests/networks/AlexNetNetwork.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-template <typename ITensorType,
- typename TensorType,
- typename SubTensorType,
- typename Accessor,
- typename ActivationLayerFunction,
- typename ConvolutionLayerFunction,
- typename DirectConvolutionLayerFunction,
- typename FullyConnectedLayerFunction,
- typename NormalizationLayerFunction,
- typename PoolingLayerFunction,
- typename SoftmaxLayerFunction>
-
-class AlexNetFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(DataType data_type, int batches)
- {
- constexpr bool weights_reshaped = false;
-
- network.init(data_type, batches, weights_reshaped);
- network.build();
- network.allocate();
- }
-
- void run()
- {
- network.run();
- }
-
- void sync()
- {
- network.sync();
- }
-
- void teardown()
- {
- network.clear();
- }
-
-private:
- networks::AlexNetNetwork<ITensorType,
- TensorType,
- SubTensorType,
- Accessor,
- ActivationLayerFunction,
- ConvolutionLayerFunction,
- DirectConvolutionLayerFunction,
- FullyConnectedLayerFunction,
- NormalizationLayerFunction,
- PoolingLayerFunction,
- SoftmaxLayerFunction>
- network{};
-};
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ALEXNETFIXTURE */
diff --git a/tests/benchmark/fixtures/LeNet5Fixture.h b/tests/benchmark/fixtures/LeNet5Fixture.h
deleted file mode 100644
index 03e426d4eb..0000000000
--- a/tests/benchmark/fixtures/LeNet5Fixture.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_LENET5FIXTURE
-#define ARM_COMPUTE_TEST_LENET5FIXTURE
-
-#include "tests/AssetsLibrary.h"
-#include "tests/Utils.h"
-#include "tests/framework/Fixture.h"
-#include "tests/networks/LeNet5Network.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-template <typename TensorType,
- typename Accessor,
- typename ActivationLayerFunction,
- typename ConvolutionLayerFunction,
- typename FullyConnectedLayerFunction,
- typename PoolingLayerFunction,
- typename SoftmaxLayerFunction>
-class LeNet5Fixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(int batches)
- {
- network.init(batches);
- network.build();
- network.allocate();
- }
-
- void run()
- {
- network.run();
- }
-
- void sync()
- {
- network.sync();
- }
-
- void teardown()
- {
- network.clear();
- }
-
-private:
- networks::LeNet5Network<TensorType,
- Accessor,
- ActivationLayerFunction,
- ConvolutionLayerFunction,
- FullyConnectedLayerFunction,
- PoolingLayerFunction,
- SoftmaxLayerFunction>
- network{};
-};
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_LENET5FIXTURE */
diff --git a/tests/benchmark/fixtures/MobileNetFixture.h b/tests/benchmark/fixtures/MobileNetFixture.h
deleted file mode 100644
index 3236e82537..0000000000
--- a/tests/benchmark/fixtures/MobileNetFixture.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_MOBILENETFIXTURE
-#define ARM_COMPUTE_TEST_MOBILENETFIXTURE
-
-#include "tests/AssetsLibrary.h"
-#include "tests/Utils.h"
-#include "tests/framework/Fixture.h"
-#include "tests/networks/MobileNetNetwork.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-template <typename TensorType,
- typename Accessor,
- typename ActivationLayerFunction,
- typename ConvolutionLayerFunction,
- typename DirectConvolutionLayerFunction,
- typename DepthwiseConvolutionLayerFunction,
- typename ReshapeFunction,
- typename PoolingLayerFunction>
-class MobileNetFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(int batches)
- {
- network.init(batches);
- network.build();
- network.allocate();
- }
-
- void run()
- {
- network.run();
- }
-
- void sync()
- {
- network.sync();
- }
-
- void teardown()
- {
- network.clear();
- }
-
-private:
- networks::MobileNetNetwork<TensorType,
- Accessor,
- ActivationLayerFunction,
- ConvolutionLayerFunction,
- DirectConvolutionLayerFunction,
- DepthwiseConvolutionLayerFunction,
- ReshapeFunction,
- PoolingLayerFunction>
- network{};
-};
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_MOBILENETFIXTURE */
diff --git a/tests/benchmark/fixtures/MobileNetV1Fixture.h b/tests/benchmark/fixtures/MobileNetV1Fixture.h
deleted file mode 100644
index 590aecf207..0000000000
--- a/tests/benchmark/fixtures/MobileNetV1Fixture.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_MOBILENETV1_FIXTURE
-#define ARM_COMPUTE_TEST_MOBILENETV1_FIXTURE
-
-#include "tests/AssetsLibrary.h"
-#include "tests/Utils.h"
-#include "tests/framework/Fixture.h"
-#include "tests/networks/MobileNetV1Network.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace benchmark
-{
-template <typename TensorType,
- typename Accessor,
- typename ActivationLayerFunction,
- typename BatchNormalizationLayerFunction,
- typename ConvolutionLayerFunction,
- typename DirectConvolutionLayerFunction,
- typename DepthwiseConvolutionFunction,
- typename ReshapeFunction,
- typename PoolingLayerFunction,
- typename SoftmaxLayerFunction,
- unsigned int InputSize>
-class MobileNetV1Fixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(int batches)
- {
- network.init(InputSize, batches);
- network.build();
- network.allocate();
- }
-
- void run()
- {
- network.run();
- }
-
- void sync()
- {
- network.sync();
- }
-
- void teardown()
- {
- network.clear();
- }
-
-private:
- networks::MobileNetV1Network<TensorType,
- Accessor,
- ActivationLayerFunction,
- BatchNormalizationLayerFunction,
- ConvolutionLayerFunction,
- DirectConvolutionLayerFunction,
- DepthwiseConvolutionFunction,
- ReshapeFunction,
- PoolingLayerFunction,
- SoftmaxLayerFunction>
- network{};
-};
-} // namespace benchmark
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_MOBILENETV1_FIXTURE */
diff --git a/tests/networks/AlexNetNetwork.h b/tests/networks/AlexNetNetwork.h
deleted file mode 100644
index e15db2a110..0000000000
--- a/tests/networks/AlexNetNetwork.h
+++ /dev/null
@@ -1,646 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
-#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
-
-#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "arm_compute/runtime/Tensor.h"
-
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/Utils.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace networks
-{
-/** AlexNet model object */
-template <typename ITensorType,
- typename TensorType,
- typename SubTensorType,
- typename Accessor,
- typename ActivationLayerFunction,
- typename ConvolutionLayerFunction,
- typename DirectConvolutionLayerFunction,
- typename FullyConnectedLayerFunction,
- typename NormalizationLayerFunction,
- typename PoolingLayerFunction,
- typename SoftmaxLayerFunction>
-class AlexNetNetwork
-{
-public:
- /** Initialize the network.
- *
- * @param[in] data_type Data type.
- * @param[in] batches Number of batches.
- * @param[in] reshaped_weights Whether the weights need reshaping or not. Default: false.
- */
- void init(DataType data_type, int batches, bool reshaped_weights = false)
- {
- _data_type = data_type;
- _batches = batches;
- _reshaped_weights = reshaped_weights;
-
- // Initialize weights and biases
- if(!_reshaped_weights)
- {
- w[0].allocator()->init(TensorInfo(TensorShape(11U, 11U, 3U, 96U), 1, _data_type));
- b[0].allocator()->init(TensorInfo(TensorShape(96U), 1, _data_type));
- w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type));
- b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type));
- w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type));
- b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type));
- w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type));
- b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type));
- w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type));
- b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type));
- w[5].allocator()->init(TensorInfo(TensorShape(9216U, 4096U), 1, _data_type));
- b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type));
- w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type));
- b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type));
- w[7].allocator()->init(TensorInfo(TensorShape(4096U, 1000U), 1, _data_type));
- b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type));
-
- w11 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates()));
- w12 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
- b11 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates(), true));
- b12 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates(128), true));
-
- w31 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates()));
- w32 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates(0, 0, 0, 192)));
- b31 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates(), true));
- b32 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates(192), true));
-
- w41 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates()));
- w42 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates(0, 0, 0, 128)));
- b41 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates(), true));
- b42 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates(128), true));
- }
- else
- {
- auto reshape = [&](unsigned int width, unsigned int height, bool convolution_layer) -> TensorShape
- {
- const bool is_optimised = std::is_same<ITensorType, ITensor>::value && data_type == DataType::F32;
-
- if(convolution_layer && is_optimised)
- {
- return TensorShape{ height, width };
- }
- else
- {
- const int interleave_width = 16 / arm_compute::data_size_from_type(_data_type);
-
- return TensorShape{ width * interleave_width, static_cast<unsigned int>(std::ceil(static_cast<float>(height) / interleave_width)) };
- }
- };
-
- // Create tensor for the reshaped weights
- w[0].allocator()->init(TensorInfo(reshape(366U, 96U, true), 1, _data_type));
-
- // Configure the direct convolution's weights. Direct convolution doesn't need reshape weights
- if(!_is_direct_conv)
- {
- auto w11_tensor = std::unique_ptr<TensorType>(new TensorType());
- auto w12_tensor = std::unique_ptr<TensorType>(new TensorType());
- auto w31_tensor = std::unique_ptr<TensorType>(new TensorType());
- auto w32_tensor = std::unique_ptr<TensorType>(new TensorType());
- auto w41_tensor = std::unique_ptr<TensorType>(new TensorType());
- auto w42_tensor = std::unique_ptr<TensorType>(new TensorType());
- w11_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U, true), 1, _data_type));
- w12_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U, true), 1, _data_type));
- w31_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U, true), 1, _data_type));
- w32_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U, true), 1, _data_type));
- w41_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U, true), 1, _data_type));
- w42_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U, true), 1, _data_type));
- w[2].allocator()->init(TensorInfo(reshape(2560U, 384U, true), 1, _data_type));
- w11 = std::move(w11_tensor);
- w12 = std::move(w12_tensor);
- w31 = std::move(w31_tensor);
- w32 = std::move(w32_tensor);
- w41 = std::move(w41_tensor);
- w42 = std::move(w42_tensor);
- }
- else
- {
- w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type));
- b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type));
- w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type));
- b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type));
- w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type));
- b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type));
- w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type));
- b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type));
- w11 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates()));
- w12 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
- b11 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates()));
- b12 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates(128)));
-
- w31 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates()));
- w32 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates(0, 0, 0, 192)));
- b31 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates()));
- b32 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates(192)));
-
- w41 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates()));
- w42 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates(0, 0, 0, 128)));
- b41 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates()));
- b42 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates(128)));
- }
-
- b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type));
- b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type));
- b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type));
-
- if(_batches > 1 && std::is_same<TensorType, Tensor>::value)
- {
- w[5].allocator()->init(TensorInfo(reshape(9216U, 4096U, false), 1, _data_type));
- w[6].allocator()->init(TensorInfo(reshape(4096U, 4096U, false), 1, _data_type));
- w[7].allocator()->init(TensorInfo(reshape(4096U, 1000U, false), 1, _data_type));
- }
- else
- {
- w[5].allocator()->init(TensorInfo(TensorShape(4096U, 9216U), 1, _data_type));
- w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type));
- w[7].allocator()->init(TensorInfo(TensorShape(1000U, 4096U), 1, _data_type));
- }
- }
- }
-
- /** Build the network */
- void build()
- {
- FullyConnectedLayerInfo fc_info;
- fc_info.are_weights_reshaped = _reshaped_weights;
-
- input.allocator()->init(TensorInfo(TensorShape(227U, 227U, 3U, _batches), 1, _data_type));
- output.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type));
-
- // Initialize intermediate tensors
- // Layer 1
- conv1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type));
- act1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type));
- norm1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type));
- pool1_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 96U, _batches), 1, _data_type));
- pool11_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates()));
- pool12_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates(0, 0, 48)));
- // Layer 2
- conv2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type));
- conv21_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates()));
- conv22_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates(0, 0, 128)));
- act2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type));
- norm2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type));
- pool2_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type));
- // Layer 3
- conv3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type));
- act3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type));
- act31_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
- act32_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
- // Layer 4
- conv4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type));
- conv41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
- conv42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
- act4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type));
- act41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
- act42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
- // Layer 5
- conv5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type));
- conv51_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates()));
- conv52_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates(0, 0, 128)));
- act5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type));
- pool5_out.allocator()->init(TensorInfo(TensorShape(6U, 6U, 256U, _batches), 1, _data_type));
- // Layer 6
- fc6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type));
- act6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type));
- // Layer 7
- fc7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type));
- act7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type));
- // Layer 8
- fc8_out.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type));
-
- // Configure Layers
- // Layer 1
- TensorType *b0 = _reshaped_weights ? nullptr : &b[0];
- conv1.configure(&input, &w[0], b0, &conv1_out, PadStrideInfo(4, 4, 0, 0), WeightsInfo(_reshaped_weights, 11U, 11U, 96U));
- act1.configure(&conv1_out, &act1_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- norm1.configure(&act1_out, &norm1_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
- pool1.configure(&norm1_out, &pool1_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
- // Layer 2
- conv21.configure(pool11_out.get(), w11.get(), b11.get(), conv21_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U, 128U));
- conv22.configure(pool12_out.get(), w12.get(), b12.get(), conv22_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U, 128U));
- act2.configure(&conv2_out, &act2_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- norm2.configure(&act2_out, &norm2_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
- pool2.configure(&norm2_out, &pool2_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
- // Layer 3
- TensorType *b2 = (_reshaped_weights && !_is_direct_conv) ? nullptr : &b[2];
- conv3.configure(&pool2_out, &w[2], b2, &conv3_out, PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 384U));
- act3.configure(&conv3_out, &act3_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- // Layer 4
- conv41.configure(act31_out.get(), w31.get(), b31.get(), conv41_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 192U));
- conv42.configure(act32_out.get(), w32.get(), b32.get(), conv42_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 192U));
- act4.configure(&conv4_out, &act4_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- // Layer 5
- conv51.configure(act41_out.get(), w41.get(), b41.get(), conv51_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 128U));
- conv52.configure(act42_out.get(), w42.get(), b42.get(), conv52_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 128U));
- act5.configure(&conv5_out, &act5_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- pool5.configure(&act5_out, &pool5_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
- // Layer 6
- fc6.configure(&pool5_out, &w[5], &b[5], &fc6_out, fc_info);
- act6.configure(&fc6_out, &act6_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- // Layer 7
- fc7.configure(&act6_out, &w[6], &b[6], &fc7_out, fc_info);
- act7.configure(&fc7_out, &act7_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- // Layer 8
- fc8.configure(&act7_out, &w[7], &b[7], &fc8_out, fc_info);
- // Softmax
- smx.configure(&fc8_out, &output);
- }
-
- /** Allocate the network */
- void allocate()
- {
- input.allocator()->allocate();
- output.allocator()->allocate();
-
- if(!_reshaped_weights)
- {
- for(auto &wi : w)
- {
- wi.allocator()->allocate();
- }
-
- for(auto &bi : b)
- {
- bi.allocator()->allocate();
- }
- }
- else
- {
- w[0].allocator()->allocate();
- w[2].allocator()->allocate();
- w[5].allocator()->allocate();
- w[6].allocator()->allocate();
- w[7].allocator()->allocate();
-
- b[5].allocator()->allocate();
- b[6].allocator()->allocate();
- b[7].allocator()->allocate();
-
- if(!_is_direct_conv)
- {
- dynamic_cast<TensorType *>(w11.get())->allocator()->allocate();
- dynamic_cast<TensorType *>(w12.get())->allocator()->allocate();
- dynamic_cast<TensorType *>(w31.get())->allocator()->allocate();
- dynamic_cast<TensorType *>(w32.get())->allocator()->allocate();
- dynamic_cast<TensorType *>(w41.get())->allocator()->allocate();
- dynamic_cast<TensorType *>(w42.get())->allocator()->allocate();
- }
- else
- {
- b[1].allocator()->allocate();
- b[2].allocator()->allocate();
- b[3].allocator()->allocate();
- b[4].allocator()->allocate();
- w[1].allocator()->allocate();
- w[3].allocator()->allocate();
- w[4].allocator()->allocate();
- }
- }
-
- conv1_out.allocator()->allocate();
- act1_out.allocator()->allocate();
- norm1_out.allocator()->allocate();
- pool1_out.allocator()->allocate();
- conv2_out.allocator()->allocate();
- act2_out.allocator()->allocate();
- norm2_out.allocator()->allocate();
- pool2_out.allocator()->allocate();
- conv3_out.allocator()->allocate();
- act3_out.allocator()->allocate();
- conv4_out.allocator()->allocate();
- act4_out.allocator()->allocate();
- conv5_out.allocator()->allocate();
- act5_out.allocator()->allocate();
- pool5_out.allocator()->allocate();
- fc6_out.allocator()->allocate();
- act6_out.allocator()->allocate();
- fc7_out.allocator()->allocate();
- act7_out.allocator()->allocate();
- fc8_out.allocator()->allocate();
- }
-
- /** Fills the trainable parameters and input with random data. */
- void fill_random()
- {
- library->fill_tensor_uniform(Accessor(input), 0);
-
- if(!_reshaped_weights)
- {
- for(unsigned int i = 0; i < w.size(); ++i)
- {
- library->fill_tensor_uniform(Accessor(w[i]), i + 1);
- library->fill_tensor_uniform(Accessor(b[i]), i + 10);
- }
- }
- else
- {
- library->fill_tensor_uniform(Accessor(w[0]), 1);
- library->fill_tensor_uniform(Accessor(w[2]), 2);
-
- library->fill_tensor_uniform(Accessor(w[5]), 3);
- library->fill_tensor_uniform(Accessor(b[5]), 4);
- library->fill_tensor_uniform(Accessor(w[6]), 5);
- library->fill_tensor_uniform(Accessor(b[6]), 6);
- library->fill_tensor_uniform(Accessor(w[7]), 7);
- library->fill_tensor_uniform(Accessor(b[7]), 8);
-
- if(!_is_direct_conv)
- {
- library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w11.get())), 9);
- library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w12.get())), 10);
- library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w31.get())), 11);
- library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w32.get())), 12);
- library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w41.get())), 13);
- library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w42.get())), 14);
- }
- else
- {
- library->fill_tensor_uniform(Accessor(w[1]), 9);
- library->fill_tensor_uniform(Accessor(b[1]), 10);
- library->fill_tensor_uniform(Accessor(w[3]), 11);
- library->fill_tensor_uniform(Accessor(b[3]), 12);
- library->fill_tensor_uniform(Accessor(w[4]), 13);
- library->fill_tensor_uniform(Accessor(b[4]), 14);
- }
- }
- }
-
- /** Fills the trainable parameters from binary files
- *
- * @param weights Files names containing the weights data
- * @param biases Files names containing the bias data
- */
- void fill(std::vector<std::string> weights, std::vector<std::string> biases)
- {
- ARM_COMPUTE_ERROR_ON(weights.size() != w.size());
- ARM_COMPUTE_ERROR_ON(biases.size() != b.size());
- ARM_COMPUTE_ERROR_ON(_reshaped_weights);
-
- for(unsigned int i = 0; i < weights.size(); ++i)
- {
- library->fill_layer_data(Accessor(w[i]), weights[i]);
- library->fill_layer_data(Accessor(b[i]), biases[i]);
- }
- }
-
- /** Feed input to network from file.
- *
- * @param name File name of containing the input data.
- */
- void feed(std::string name)
- {
- library->fill_layer_data(Accessor(input), name);
- }
-
- /** Get the classification results.
- *
- * @return Vector containing the classified labels
- */
- std::vector<unsigned int> get_classifications()
- {
- std::vector<unsigned int> classified_labels;
- Accessor output_accessor(output);
-
- Window window;
- window.set(Window::DimX, Window::Dimension(0, 1, 1));
- for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
- {
- window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
- }
-
- execute_window_loop(window, [&](const Coordinates & id)
- {
- int max_idx = 0;
- float val = 0;
- const void *const out_ptr = output_accessor(id);
- for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
- {
- float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
- if(curr_val > val)
- {
- max_idx = l;
- val = curr_val;
- }
- }
- classified_labels.push_back(max_idx);
- });
- return classified_labels;
- }
-
- /** Clear all allocated memory from the tensor objects */
- void clear()
- {
- // Free allocations
- input.allocator()->free();
- output.allocator()->free();
-
- if(!_reshaped_weights)
- {
- for(auto &wi : w)
- {
- wi.allocator()->free();
- }
-
- for(auto &bi : b)
- {
- bi.allocator()->free();
- }
- }
- else
- {
- w[0].allocator()->free();
- w[2].allocator()->free();
- w[5].allocator()->free();
- w[6].allocator()->free();
- w[7].allocator()->free();
-
- b[5].allocator()->free();
- b[6].allocator()->free();
- b[7].allocator()->free();
-
- if(_is_direct_conv)
- {
- w[3].allocator()->free();
- w[4].allocator()->free();
- b[2].allocator()->free();
- b[3].allocator()->free();
- b[4].allocator()->free();
- }
- }
-
- w11.reset();
- w12.reset();
- b11.reset();
- b11.reset();
- w31.reset();
- w32.reset();
- b31.reset();
- b32.reset();
- w41.reset();
- w42.reset();
- b41.reset();
- b42.reset();
-
- conv1_out.allocator()->free();
- act1_out.allocator()->free();
- norm1_out.allocator()->free();
- pool1_out.allocator()->free();
- conv2_out.allocator()->free();
- act2_out.allocator()->free();
- norm2_out.allocator()->free();
- pool2_out.allocator()->free();
- conv3_out.allocator()->free();
- act3_out.allocator()->free();
- conv4_out.allocator()->free();
- act4_out.allocator()->free();
- conv5_out.allocator()->free();
- act5_out.allocator()->free();
- pool5_out.allocator()->free();
- fc6_out.allocator()->free();
- act6_out.allocator()->free();
- fc7_out.allocator()->free();
- act7_out.allocator()->free();
- fc8_out.allocator()->free();
- }
-
- /** Runs the model */
- void run()
- {
- // Layer 1
- conv1.run();
- act1.run();
- norm1.run();
- pool1.run();
- // Layer 2
- conv21.run();
- conv22.run();
- act2.run();
- norm2.run();
- pool2.run();
- // Layer 3
- conv3.run();
- act3.run();
- // Layer 4
- conv41.run();
- conv42.run();
- act4.run();
- // Layer 5
- conv51.run();
- conv52.run();
- act5.run();
- pool5.run();
- // Layer 6
- fc6.run();
- act6.run();
- // Layer 7
- fc7.run();
- act7.run();
- // Layer 8
- fc8.run();
- // Softmax
- smx.run();
- }
-
- /** Sync the results */
- void sync()
- {
- sync_if_necessary<TensorType>();
- sync_tensor_if_necessary<TensorType>(output);
- }
-
-private:
- struct DirectConv
- {
- template <typename ConvolutionLayerFunction1 = ConvolutionLayerFunction, typename DirectConvolutionLayerFunction1 = DirectConvolutionLayerFunction>
- typename std::enable_if < !std::is_same<ConvolutionLayerFunction1, DirectConvolutionLayerFunction1>::value, void >::type
- configure(ITensorType *input, const ITensorType *weights, const ITensorType *biases, ITensorType *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo())
- {
- _func.configure(input, weights, biases, output, conv_info);
- }
-
- template <typename ConvolutionLayerFunction1 = ConvolutionLayerFunction, typename DirectConvolutionLayerFunction1 = DirectConvolutionLayerFunction>
- typename std::enable_if<std::is_same<ConvolutionLayerFunction1, DirectConvolutionLayerFunction1>::value, void>::type
- configure(ITensorType *input, const ITensorType *weights, const ITensorType *biases, ITensorType *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo())
- {
- _func.configure(input, weights, biases, output, conv_info, weights_info);
- }
-
- void run()
- {
- _func.run();
- }
-
- DirectConvolutionLayerFunction _func{};
- };
-
- DataType _data_type{ DataType::UNKNOWN };
- unsigned int _batches{ 0 };
- bool _reshaped_weights{ false };
- bool _is_direct_conv{ !std::is_same<ConvolutionLayerFunction, DirectConvolutionLayerFunction>::value };
-
- ActivationLayerFunction act1{}, act2{}, act3{}, act4{}, act5{}, act6{}, act7{};
- ConvolutionLayerFunction conv1{};
- DirectConv conv21{}, conv22{}, conv3{}, conv41{}, conv42{}, conv51{}, conv52{};
- FullyConnectedLayerFunction fc6{}, fc7{}, fc8{};
- NormalizationLayerFunction norm1{}, norm2{};
- PoolingLayerFunction pool1{}, pool2{}, pool5{};
- SoftmaxLayerFunction smx{};
-
- TensorType input{}, output{};
- std::array<TensorType, 8> w{ {} }, b{ {} };
- std::unique_ptr<ITensorType> w11{ nullptr }, w12{ nullptr }, b11{ nullptr }, b12{ nullptr };
- std::unique_ptr<ITensorType> w31{ nullptr }, w32{ nullptr }, b31{ nullptr }, b32{ nullptr };
- std::unique_ptr<ITensorType> w41{ nullptr }, w42{ nullptr }, b41{ nullptr }, b42{ nullptr };
-
- TensorType conv1_out{}, act1_out{}, norm1_out{}, pool1_out{};
- TensorType conv2_out{}, act2_out{}, pool2_out{}, norm2_out{};
- TensorType conv3_out{}, act3_out{};
- TensorType conv4_out{}, act4_out{};
- TensorType conv5_out{}, act5_out{}, pool5_out{};
- TensorType fc6_out{}, act6_out{};
- TensorType fc7_out{}, act7_out{};
- TensorType fc8_out{};
-
- std::unique_ptr<SubTensorType> pool11_out{}, pool12_out{};
- std::unique_ptr<SubTensorType> conv21_out{}, conv22_out{};
- std::unique_ptr<SubTensorType> act31_out{}, act32_out{};
- std::unique_ptr<SubTensorType> conv41_out{}, conv42_out{}, act41_out{}, act42_out{};
- std::unique_ptr<SubTensorType> conv51_out{}, conv52_out{};
-};
-} // namespace networks
-} // namespace test
-} // namespace arm_compute
-#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
diff --git a/tests/networks/LeNet5Network.h b/tests/networks/LeNet5Network.h
deleted file mode 100644
index 9cfd59284c..0000000000
--- a/tests/networks/LeNet5Network.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_LENET5_H__
-#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_LENET5_H__
-
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/Utils.h"
-
-#include <memory>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-
-namespace arm_compute
-{
-namespace test
-{
-namespace networks
-{
-/** Lenet5 model object */
-template <typename TensorType,
- typename Accessor,
- typename ActivationLayerFunction,
- typename ConvolutionLayerFunction,
- typename FullyConnectedLayerFunction,
- typename PoolingLayerFunction,
- typename SoftmaxLayerFunction>
-class LeNet5Network
-{
-public:
- /** Initialize the network.
- *
- * @param[in] batches Number of batches.
- */
- void init(int batches)
- {
- _batches = batches;
-
- // Initialize input, output, weights and biases
- input.allocator()->init(TensorInfo(TensorShape(28U, 28U, 1U, _batches), 1, DataType::F32));
- output.allocator()->init(TensorInfo(TensorShape(10U, _batches), 1, DataType::F32));
- w[0].allocator()->init(TensorInfo(TensorShape(5U, 5U, 1U, 20U), 1, DataType::F32));
- b[0].allocator()->init(TensorInfo(TensorShape(20U), 1, DataType::F32));
- w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 20U, 50U), 1, DataType::F32));
- b[1].allocator()->init(TensorInfo(TensorShape(50U), 1, DataType::F32));
- w[2].allocator()->init(TensorInfo(TensorShape(800U, 500U), 1, DataType::F32));
- b[2].allocator()->init(TensorInfo(TensorShape(500U), 1, DataType::F32));
- w[3].allocator()->init(TensorInfo(TensorShape(500U, 10U), 1, DataType::F32));
- b[3].allocator()->init(TensorInfo(TensorShape(10U), 1, DataType::F32));
- }
-
- /** Build the model. */
- void build()
- {
- // Initialize intermediate tensors
- // Layer 1
- conv1_out.allocator()->init(TensorInfo(TensorShape(24U, 24U, 20U, _batches), 1, DataType::F32));
- pool1_out.allocator()->init(TensorInfo(TensorShape(12U, 12U, 20U, _batches), 1, DataType::F32));
- // Layer 2
- conv2_out.allocator()->init(TensorInfo(TensorShape(8U, 8U, 50U, _batches), 1, DataType::F32));
- pool2_out.allocator()->init(TensorInfo(TensorShape(4U, 4U, 50U, _batches), 1, DataType::F32));
- // Layer 3
- fc1_out.allocator()->init(TensorInfo(TensorShape(500U, _batches), 1, DataType::F32));
- act1_out.allocator()->init(TensorInfo(TensorShape(500U, _batches), 1, DataType::F32));
- // Layer 6
- fc2_out.allocator()->init(TensorInfo(TensorShape(10U, _batches), 1, DataType::F32));
-
- // Configure Layers
- conv1.configure(&input, &w[0], &b[0], &conv1_out, PadStrideInfo(1, 1, 0, 0));
- pool1.configure(&conv1_out, &pool1_out, PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0)));
- conv2.configure(&pool1_out, &w[1], &b[1], &conv2_out, PadStrideInfo(1, 1, 0, 0));
- pool2.configure(&conv2_out, &pool2_out, PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0)));
- fc1.configure(&pool2_out, &w[2], &b[2], &fc1_out);
- act1.configure(&fc1_out, &act1_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- fc2.configure(&act1_out, &w[3], &b[3], &fc2_out);
- smx.configure(&fc2_out, &output);
- }
-
- /** Allocate the network */
- void allocate()
- {
- // Allocate tensors
- input.allocator()->allocate();
- output.allocator()->allocate();
- for(auto &wi : w)
- {
- wi.allocator()->allocate();
- }
- for(auto &bi : b)
- {
- bi.allocator()->allocate();
- }
- conv1_out.allocator()->allocate();
- pool1_out.allocator()->allocate();
- conv2_out.allocator()->allocate();
- pool2_out.allocator()->allocate();
- fc1_out.allocator()->allocate();
- act1_out.allocator()->allocate();
- fc2_out.allocator()->allocate();
- }
-
- /** Fills the trainable parameters and input with random data. */
- void fill_random()
- {
- std::uniform_real_distribution<> distribution(-1, 1);
- library->fill(Accessor(input), distribution, 0);
- for(unsigned int i = 0; i < w.size(); ++i)
- {
- library->fill(Accessor(w[i]), distribution, i + 1);
- library->fill(Accessor(b[i]), distribution, i + 10);
- }
- }
-
- /** Fills the trainable parameters from binary files
- *
- * @param weights Files names containing the weights data
- * @param biases Files names containing the bias data
- */
- void fill(std::vector<std::string> weights, std::vector<std::string> biases)
- {
- ARM_COMPUTE_ERROR_ON(weights.size() != w.size());
- ARM_COMPUTE_ERROR_ON(biases.size() != b.size());
-
- for(unsigned int i = 0; i < weights.size(); ++i)
- {
- library->fill_layer_data(Accessor(w[i]), weights[i]);
- library->fill_layer_data(Accessor(b[i]), biases[i]);
- }
- }
-
- /** Feed input to network from file.
- *
- * @param name File name of containing the input data.
- */
- void feed(std::string name)
- {
- library->fill_layer_data(Accessor(input), name);
- }
-
- /** Get the classification results.
- *
- * @return Vector containing the classified labels
- */
- std::vector<unsigned int> get_classifications()
- {
- std::vector<unsigned int> classified_labels;
- Accessor output_accessor(output);
-
- Window window;
- window.set(Window::DimX, Window::Dimension(0, 1, 1));
- for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
- {
- window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
- }
-
- execute_window_loop(window, [&](const Coordinates & id)
- {
- int max_idx = 0;
- float val = 0;
- const void *const out_ptr = output_accessor(id);
- for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
- {
- float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
- if(curr_val > val)
- {
- max_idx = l;
- val = curr_val;
- }
- }
- classified_labels.push_back(max_idx);
- });
- return classified_labels;
- }
-
- /** Clear all allocated memory from the tensor objects */
- void clear()
- {
- input.allocator()->free();
- output.allocator()->free();
- for(auto &wi : w)
- {
- wi.allocator()->free();
- }
- for(auto &bi : b)
- {
- bi.allocator()->free();
- }
-
- conv1_out.allocator()->free();
- pool1_out.allocator()->free();
- conv2_out.allocator()->free();
- pool2_out.allocator()->free();
- fc1_out.allocator()->free();
- act1_out.allocator()->free();
- fc2_out.allocator()->free();
- }
-
- /** Runs the model */
- void run()
- {
- // Layer 1
- conv1.run();
- pool1.run();
- // Layer 2
- conv2.run();
- pool2.run();
- // Layer 3
- fc1.run();
- act1.run();
- // Layer 4
- fc2.run();
- // Softmax
- smx.run();
- }
-
- /** Sync the results */
- void sync()
- {
- sync_if_necessary<TensorType>();
- sync_tensor_if_necessary<TensorType>(output);
- }
-
-private:
- unsigned int _batches{ 0 };
-
- ActivationLayerFunction act1{};
- ConvolutionLayerFunction conv1{}, conv2{};
- FullyConnectedLayerFunction fc1{}, fc2{};
- PoolingLayerFunction pool1{}, pool2{};
- SoftmaxLayerFunction smx{};
-
- TensorType input{}, output{};
- std::array<TensorType, 4> w{ {} }, b{ {} };
-
- TensorType conv1_out{}, pool1_out{};
- TensorType conv2_out{}, pool2_out{};
- TensorType fc1_out{}, act1_out{};
- TensorType fc2_out{};
-};
-} // namespace networks
-} // namespace test
-} // namespace arm_compute
-#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_LENET5_H__
diff --git a/tests/networks/MobileNetNetwork.h b/tests/networks/MobileNetNetwork.h
deleted file mode 100644
index ec054b237e..0000000000
--- a/tests/networks/MobileNetNetwork.h
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENET_H__
-#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENET_H__
-
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/Utils.h"
-
-#include "utils/Utils.h"
-
-#include <memory>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-
-namespace arm_compute
-{
-namespace test
-{
-namespace networks
-{
-/** MobileNet model object */
-template <typename TensorType,
- typename Accessor,
- typename ActivationLayerFunction,
- typename ConvolutionLayerFunction,
- typename DirectConvolutionLayerFunction,
- typename DepthwiseConvolutionLayerFunction,
- typename ReshapeFunction,
- typename PoolingLayerFunction>
-class MobileNetNetwork
-{
-public:
- /** Initialize the network.
- *
- * @param[in] batches Number of batches.
- */
- void init(int batches)
- {
- _batches = batches;
-
- // Initialize input, output
- input.allocator()->init(TensorInfo(TensorShape(224U, 224U, 3U, _batches), 1, DataType::F32));
- output.allocator()->init(TensorInfo(TensorShape(11U, _batches), 1, DataType::F32));
- // Initialize weights and biases
- w_conv3x3.allocator()->init(TensorInfo(TensorShape(3U, 3U, 3U, 16U), 1, DataType::F32));
- b_conv3x3.allocator()->init(TensorInfo(TensorShape(16U), 1, DataType::F32));
- depthwise_conv_block_init(0, 16, 16);
- depthwise_conv_block_init(1, 16, 32);
- depthwise_conv_block_init(2, 32, 32);
- depthwise_conv_block_init(3, 32, 64);
- depthwise_conv_block_init(4, 64, 64);
- depthwise_conv_block_init(5, 64, 128);
- depthwise_conv_block_init(6, 128, 128);
- depthwise_conv_block_init(7, 128, 128);
- depthwise_conv_block_init(8, 128, 128);
- depthwise_conv_block_init(9, 128, 128);
- depthwise_conv_block_init(10, 128, 128);
- depthwise_conv_block_init(11, 128, 256);
- depthwise_conv_block_init(12, 256, 256);
- w_conv[13].allocator()->init(TensorInfo(TensorShape(1U, 1U, 256U, 11U), 1, DataType::F32));
- b_conv[13].allocator()->init(TensorInfo(TensorShape(11U), 1, DataType::F32));
- }
-
- /** Build the model. */
- void build()
- {
- // Configure Layers
- conv3x3.configure(&input, &w_conv3x3, &b_conv3x3, &conv_out[0], PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR));
- conv3x3_act.configure(&conv_out[0], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
- depthwise_conv_block_build(0, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(1, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(2, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(3, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(4, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(5, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(6, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(7, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(8, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(9, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(10, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(11, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(12, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- pool.configure(&conv_out[13], &pool_out, PoolingLayerInfo(PoolingType::AVG, 7, PadStrideInfo(2, 2, 0, 0)));
- conv1x1[13].configure(&pool_out, &w_conv[13], &b_conv[13], &conv_out[14], PadStrideInfo(1, 1, 0, 0));
- logistic.configure(&conv_out[14], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
- reshape.configure(&conv_out[14], &output);
- }
-
- /** Allocate the network. */
- void allocate()
- {
- input.allocator()->allocate();
- output.allocator()->allocate();
-
- w_conv3x3.allocator()->allocate();
- b_conv3x3.allocator()->allocate();
- for(unsigned int i = 0; i < w_conv.size(); ++i)
- {
- w_conv[i].allocator()->allocate();
- b_conv[i].allocator()->allocate();
- }
- for(unsigned int i = 0; i < w_dwc.size(); ++i)
- {
- w_dwc[i].allocator()->allocate();
- b_dwc[i].allocator()->allocate();
- }
- for(auto &o : conv_out)
- {
- o.allocator()->allocate();
- }
- for(auto &o : dwc_out)
- {
- o.allocator()->allocate();
- }
- pool_out.allocator()->allocate();
- }
-
- /** Fills the trainable parameters and input with random data. */
- void fill_random()
- {
- unsigned int seed_idx = 0;
- std::uniform_real_distribution<> distribution(-1, 1);
- library->fill(Accessor(input), distribution, seed_idx++);
-
- library->fill(Accessor(w_conv3x3), distribution, seed_idx++);
- library->fill(Accessor(b_conv3x3), distribution, seed_idx++);
- for(unsigned int i = 0; i < w_conv.size(); ++i)
- {
- library->fill(Accessor(w_conv[i]), distribution, seed_idx++);
- library->fill(Accessor(b_conv[i]), distribution, seed_idx++);
- }
- for(unsigned int i = 0; i < w_dwc.size(); ++i)
- {
- library->fill(Accessor(w_dwc[i]), distribution, seed_idx++);
- library->fill(Accessor(b_dwc[i]), distribution, seed_idx++);
- }
- }
-
- /** Feed input to network from file.
- *
- * @param name File name of containing the input data.
- */
- void feed(std::string name)
- {
- library->fill_layer_data(Accessor(input), name);
- }
-
- /** Get the classification results.
- *
- * @return Vector containing the classified labels
- */
- std::vector<unsigned int> get_classifications()
- {
- std::vector<unsigned int> classified_labels;
- Accessor output_accessor(output);
-
- Window window;
- window.set(Window::DimX, Window::Dimension(0, 1, 1));
- for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
- {
- window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
- }
-
- execute_window_loop(window, [&](const Coordinates & id)
- {
- int max_idx = 0;
- float val = 0;
- const void *const out_ptr = output_accessor(id);
- for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
- {
- float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
- if(curr_val > val)
- {
- max_idx = l;
- val = curr_val;
- }
- }
- classified_labels.push_back(max_idx);
- });
- return classified_labels;
- }
-
- /** Clear all allocated memory from the tensor objects */
- void clear()
- {
- input.allocator()->free();
- output.allocator()->free();
-
- w_conv3x3.allocator()->free();
- b_conv3x3.allocator()->free();
- for(unsigned int i = 0; i < w_conv.size(); ++i)
- {
- w_conv[i].allocator()->free();
- b_conv[i].allocator()->free();
- }
- for(unsigned int i = 0; i < w_dwc.size(); ++i)
- {
- w_dwc[i].allocator()->free();
- b_dwc[i].allocator()->free();
- }
- for(auto &o : conv_out)
- {
- o.allocator()->free();
- }
- for(auto &o : dwc_out)
- {
- o.allocator()->free();
- }
- pool_out.allocator()->free();
- }
-
- /** Runs the model */
- void run()
- {
- conv3x3.run();
- conv3x3_act.run();
- depthwise_conv_block_run(0);
- depthwise_conv_block_run(1);
- depthwise_conv_block_run(2);
- depthwise_conv_block_run(3);
- depthwise_conv_block_run(4);
- depthwise_conv_block_run(5);
- depthwise_conv_block_run(6);
- depthwise_conv_block_run(7);
- depthwise_conv_block_run(8);
- depthwise_conv_block_run(9);
- depthwise_conv_block_run(10);
- depthwise_conv_block_run(11);
- depthwise_conv_block_run(12);
- pool.run();
- conv1x1[13].run();
- logistic.run();
- reshape.run();
- }
-
- /** Sync the results */
- void sync()
- {
- sync_if_necessary<TensorType>();
- sync_tensor_if_necessary<TensorType>(output);
- }
-
-private:
- void depthwise_conv_block_init(unsigned int idx, unsigned int ifm, unsigned int ofm)
- {
- w_dwc[idx].allocator()->init(TensorInfo(TensorShape(3U, 3U, ifm), 1, DataType::F32));
- b_dwc[idx].allocator()->init(TensorInfo(TensorShape(ifm), 1, DataType::F32));
- w_conv[idx].allocator()->init(TensorInfo(TensorShape(1U, 1U, ifm, ofm), 1, DataType::F32));
- b_conv[idx].allocator()->init(TensorInfo(TensorShape(ofm), 1, DataType::F32));
- }
- void depthwise_conv_block_build(unsigned int idx, PadStrideInfo dwc_ps, PadStrideInfo conv_ps)
- {
- dwc3x3[idx].configure(&conv_out[idx], &w_dwc[idx], &b_dwc[idx], &dwc_out[idx], dwc_ps);
- act[2 * idx].configure(&dwc_out[idx], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
- conv1x1[idx].configure(&dwc_out[idx], &w_conv[idx], &b_conv[idx], &conv_out[idx + 1], conv_ps);
- act[2 * idx + 1].configure(&conv_out[idx], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
- }
- void depthwise_conv_block_run(unsigned int idx)
- {
- dwc3x3[idx].run();
- act[2 * idx].run();
- conv1x1[idx].run();
- act[2 * idx + 1].run();
- }
-
-private:
- unsigned int _batches{ 0 };
-
- ConvolutionLayerFunction conv3x3{};
- ActivationLayerFunction conv3x3_act{};
- std::array<ActivationLayerFunction, 26> act{ {} };
- std::array<DirectConvolutionLayerFunction, 14> conv1x1{ {} };
- std::array<DepthwiseConvolutionLayerFunction, 13> dwc3x3{ {} };
- PoolingLayerFunction pool{};
- ActivationLayerFunction logistic{};
- ReshapeFunction reshape{};
-
- TensorType w_conv3x3{}, b_conv3x3{};
- std::array<TensorType, 14> w_conv{ {} }, b_conv{ {} };
- std::array<TensorType, 13> w_dwc{ {} }, b_dwc{ {} };
-
- TensorType input{}, output{};
-
- std::array<TensorType, 15> conv_out{ {} };
- std::array<TensorType, 13> dwc_out{ {} };
- TensorType pool_out{};
-};
-} // namespace networks
-} // namespace test
-} // namespace arm_compute
-#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENET_H__
diff --git a/tests/networks/MobileNetV1Network.h b/tests/networks/MobileNetV1Network.h
deleted file mode 100644
index aea5c113e8..0000000000
--- a/tests/networks/MobileNetV1Network.h
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENETV1_H__
-#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENETV1_H__
-
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/Utils.h"
-
-#include "utils/Utils.h"
-
-#include <memory>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-
-namespace arm_compute
-{
-namespace test
-{
-namespace networks
-{
-/** MobileNet model object */
-template <typename TensorType,
- typename Accessor,
- typename ActivationLayerFunction,
- typename BatchNormalizationLayerFunction,
- typename ConvolutionLayerFunction,
- typename DirectConvolutionLayerFunction,
- typename DepthwiseConvolutionFunction,
- typename ReshapeFunction,
- typename PoolingLayerFunction,
- typename SoftmaxLayerFunction>
-class MobileNetV1Network
-{
-public:
- /** Initialize the network.
- *
- * @param[in] input_spatial_size Size of the spatial input.
- * @param[in] batches Number of batches.
- */
- void init(unsigned int input_spatial_size, int batches)
- {
- _batches = batches;
- _input_spatial_size = input_spatial_size;
-
- // Currently supported sizes
- ARM_COMPUTE_ERROR_ON(input_spatial_size != 128 && input_spatial_size != 224);
-
- // Initialize input, output
- input.allocator()->init(TensorInfo(TensorShape(input_spatial_size, input_spatial_size, 3U, _batches), 1, DataType::F32));
- output.allocator()->init(TensorInfo(TensorShape(1001U, _batches), 1, DataType::F32));
- // Initialize weights and biases
- w_conv3x3.allocator()->init(TensorInfo(TensorShape(3U, 3U, 3U, 32U), 1, DataType::F32));
- mean_conv3x3.allocator()->init(TensorInfo(TensorShape(32U), 1, DataType::F32));
- var_conv3x3.allocator()->init(TensorInfo(TensorShape(32U), 1, DataType::F32));
- beta_conv3x3.allocator()->init(TensorInfo(TensorShape(32U), 1, DataType::F32));
- gamma_conv3x3.allocator()->init(TensorInfo(TensorShape(32U), 1, DataType::F32));
- depthwise_conv_block_init(0, 32, 32);
- depthwise_conv_block_init(1, 32, 64);
- depthwise_conv_block_init(2, 64, 64);
- depthwise_conv_block_init(3, 64, 128);
- depthwise_conv_block_init(4, 128, 256);
- depthwise_conv_block_init(5, 256, 512);
- depthwise_conv_block_init(6, 512, 512);
- depthwise_conv_block_init(7, 512, 512);
- depthwise_conv_block_init(8, 512, 512);
- depthwise_conv_block_init(9, 512, 512);
- depthwise_conv_block_init(10, 512, 512);
- depthwise_conv_block_init(11, 512, 1024);
- depthwise_conv_block_init(12, 1024, 1024);
- w_conv1c.allocator()->init(TensorInfo(TensorShape(1U, 1U, 1024U, 1001U), 1, DataType::F32));
- b_conv1c.allocator()->init(TensorInfo(TensorShape(1001U), 1, DataType::F32));
- // Init reshaped output
- reshape_out.allocator()->init(TensorInfo(TensorShape(1001U, _batches), 1, DataType::F32));
- }
-
- /** Build the model. */
- void build()
- {
- // Configure Layers
- conv3x3.configure(&input, &w_conv3x3, nullptr, &conv_out[0], PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR));
- conv3x3_bn.configure(&conv_out[0], nullptr, &mean_conv3x3, &var_conv3x3, &beta_conv3x3, &gamma_conv3x3, 0.001f);
- conv3x3_act.configure(&conv_out[0], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
- depthwise_conv_block_build(0, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(1, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(2, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(3, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(4, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(5, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(6, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(7, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(8, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(9, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(10, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(11, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- depthwise_conv_block_build(12, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
- pool.configure(&conv_out[13], &pool_out, PoolingLayerInfo(PoolingType::AVG));
- conv1c.configure(&pool_out, &w_conv1c, &b_conv1c, &conv_out[14], PadStrideInfo(1, 1, 0, 0));
- reshape.configure(&conv_out[14], &reshape_out);
- smx.configure(&reshape_out, &output);
- }
-
- /** Allocate the network. */
- void allocate()
- {
- input.allocator()->allocate();
- output.allocator()->allocate();
-
- w_conv3x3.allocator()->allocate();
- mean_conv3x3.allocator()->allocate();
- var_conv3x3.allocator()->allocate();
- beta_conv3x3.allocator()->allocate();
- gamma_conv3x3.allocator()->allocate();
-
- ARM_COMPUTE_ERROR_ON(w_conv.size() != w_dwc.size());
- for(unsigned int i = 0; i < w_conv.size(); ++i)
- {
- w_dwc[i].allocator()->allocate();
- bn_mean[2 * i].allocator()->allocate();
- bn_var[2 * i].allocator()->allocate();
- bn_beta[2 * i].allocator()->allocate();
- bn_gamma[2 * i].allocator()->allocate();
- w_conv[i].allocator()->allocate();
- bn_mean[2 * i + 1].allocator()->allocate();
- bn_var[2 * i + 1].allocator()->allocate();
- bn_beta[2 * i + 1].allocator()->allocate();
- bn_gamma[2 * i + 1].allocator()->allocate();
- }
- w_conv1c.allocator()->allocate();
- b_conv1c.allocator()->allocate();
-
- // Allocate intermediate buffers
- for(auto &o : conv_out)
- {
- o.allocator()->allocate();
- }
- for(auto &o : dwc_out)
- {
- o.allocator()->allocate();
- }
- pool_out.allocator()->allocate();
- reshape_out.allocator()->allocate();
- }
-
- /** Fills the trainable parameters and input with random data. */
- void fill_random()
- {
- unsigned int seed_idx = 0;
- std::uniform_real_distribution<> distribution(-1, 1);
- library->fill(Accessor(input), distribution, seed_idx++);
-
- library->fill(Accessor(w_conv3x3), distribution, seed_idx++);
- library->fill(Accessor(mean_conv3x3), distribution, seed_idx++);
- library->fill(Accessor(var_conv3x3), distribution, seed_idx++);
- library->fill(Accessor(beta_conv3x3), distribution, seed_idx++);
- library->fill(Accessor(gamma_conv3x3), distribution, seed_idx++);
-
- ARM_COMPUTE_ERROR_ON(w_conv.size() != w_dwc.size());
- for(unsigned int i = 0; i < w_conv.size(); ++i)
- {
- library->fill(Accessor(w_dwc[i]), distribution, seed_idx++);
- library->fill(Accessor(bn_mean[2 * i]), distribution, seed_idx++);
- library->fill(Accessor(bn_var[2 * i]), distribution, seed_idx++);
- library->fill(Accessor(bn_beta[2 * i]), distribution, seed_idx++);
- library->fill(Accessor(bn_gamma[2 * i]), distribution, seed_idx++);
- library->fill(Accessor(w_conv[i]), distribution, seed_idx++);
- library->fill(Accessor(bn_mean[2 * i + 1]), distribution, seed_idx++);
- library->fill(Accessor(bn_var[2 * i + 1]), distribution, seed_idx++);
- library->fill(Accessor(bn_beta[2 * i + 1]), distribution, seed_idx++);
- library->fill(Accessor(bn_gamma[2 * i + 1]), distribution, seed_idx++);
- }
- library->fill(Accessor(w_conv1c), distribution, seed_idx++);
- library->fill(Accessor(b_conv1c), distribution, seed_idx++);
- }
-
- /** Feed input to network from file.
- *
- * @param name File name of containing the input data.
- */
- void feed(std::string name)
- {
- library->fill_layer_data(Accessor(input), name);
- }
-
- /** Get the classification results.
- *
- * @return Vector containing the classified labels
- */
- std::vector<unsigned int> get_classifications()
- {
- std::vector<unsigned int> classified_labels;
- Accessor output_accessor(output);
-
- Window window;
- window.set(Window::DimX, Window::Dimension(0, 1, 1));
- for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
- {
- window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
- }
-
- execute_window_loop(window, [&](const Coordinates & id)
- {
- int max_idx = 0;
- float val = 0;
- const void *const out_ptr = output_accessor(id);
- for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
- {
- float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
- if(curr_val > val)
- {
- max_idx = l;
- val = curr_val;
- }
- }
- classified_labels.push_back(max_idx);
- });
- return classified_labels;
- }
-
- /** Clear all allocated memory from the tensor objects */
- void clear()
- {
- input.allocator()->free();
- output.allocator()->free();
-
- w_conv3x3.allocator()->free();
- mean_conv3x3.allocator()->free();
- var_conv3x3.allocator()->free();
- beta_conv3x3.allocator()->free();
- gamma_conv3x3.allocator()->free();
-
- ARM_COMPUTE_ERROR_ON(w_conv.size() != w_dwc.size());
- for(unsigned int i = 0; i < w_conv.size(); ++i)
- {
- w_dwc[i].allocator()->free();
- bn_mean[2 * i].allocator()->free();
- bn_var[2 * i].allocator()->free();
- bn_beta[2 * i].allocator()->free();
- bn_gamma[2 * i].allocator()->free();
- w_conv[i].allocator()->free();
- bn_mean[2 * i + 1].allocator()->free();
- bn_var[2 * i + 1].allocator()->free();
- bn_beta[2 * i + 1].allocator()->free();
- bn_gamma[2 * i + 1].allocator()->free();
- }
- w_conv1c.allocator()->free();
- b_conv1c.allocator()->free();
-
- // Free intermediate buffers
- for(auto &o : conv_out)
- {
- o.allocator()->free();
- }
- for(auto &o : dwc_out)
- {
- o.allocator()->free();
- }
- pool_out.allocator()->free();
- reshape_out.allocator()->free();
- }
-
- /** Runs the model */
- void run()
- {
- conv3x3.run();
- conv3x3_bn.run();
- conv3x3_act.run();
- depthwise_conv_block_run(0);
- depthwise_conv_block_run(1);
- depthwise_conv_block_run(2);
- depthwise_conv_block_run(3);
- depthwise_conv_block_run(4);
- depthwise_conv_block_run(5);
- depthwise_conv_block_run(6);
- depthwise_conv_block_run(7);
- depthwise_conv_block_run(8);
- depthwise_conv_block_run(9);
- depthwise_conv_block_run(10);
- depthwise_conv_block_run(11);
- depthwise_conv_block_run(12);
- pool.run();
- conv1c.run();
- reshape.run();
- smx.run();
- }
-
- /** Sync the results */
- void sync()
- {
- sync_if_necessary<TensorType>();
- sync_tensor_if_necessary<TensorType>(output);
- }
-
-private:
- void depthwise_conv_block_init(unsigned int idx, unsigned int ifm, unsigned int ofm)
- {
- // Depthwise Convolution weights
- w_dwc[idx].allocator()->init(TensorInfo(TensorShape(3U, 3U, ifm), 1, DataType::F32));
- // Batch normalization parameters
- bn_mean[2 * idx].allocator()->init(TensorInfo(TensorShape(ifm), 1, DataType::F32));
- bn_var[2 * idx].allocator()->init(TensorInfo(TensorShape(ifm), 1, DataType::F32));
- bn_beta[2 * idx].allocator()->init(TensorInfo(TensorShape(ifm), 1, DataType::F32));
- bn_gamma[2 * idx].allocator()->init(TensorInfo(TensorShape(ifm), 1, DataType::F32));
- // Convolution weights
- w_conv[idx].allocator()->init(TensorInfo(TensorShape(1U, 1U, ifm, ofm), 1, DataType::F32));
- // Batch normalization parameters
- bn_mean[2 * idx + 1].allocator()->init(TensorInfo(TensorShape(ofm), 1, DataType::F32));
- bn_var[2 * idx + 1].allocator()->init(TensorInfo(TensorShape(ofm), 1, DataType::F32));
- bn_beta[2 * idx + 1].allocator()->init(TensorInfo(TensorShape(ofm), 1, DataType::F32));
- bn_gamma[2 * idx + 1].allocator()->init(TensorInfo(TensorShape(ofm), 1, DataType::F32));
- }
- void depthwise_conv_block_build(unsigned int idx, PadStrideInfo dwc_ps, PadStrideInfo conv_ps)
- {
- // Configure depthwise convolution block
- dwc3x3[idx].configure(&conv_out[idx], &w_dwc[idx], nullptr, &dwc_out[idx], dwc_ps);
- bn[2 * idx].configure(&dwc_out[idx], nullptr, &bn_mean[2 * idx], &bn_var[2 * idx], &bn_beta[2 * idx], &bn_gamma[2 * idx], 0.001f);
- act[2 * idx].configure(&dwc_out[idx], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
- // Configure pointwise convolution block
- conv1x1[idx].configure(&dwc_out[idx], &w_conv[idx], nullptr, &conv_out[idx + 1], conv_ps);
- bn[2 * idx + 1].configure(&conv_out[idx + 1], nullptr, &bn_mean[2 * idx + 1], &bn_var[2 * idx + 1], &bn_beta[2 * idx + 1], &bn_gamma[2 * idx + 1], 0.001f);
- act[2 * idx + 1].configure(&conv_out[idx], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
- }
- void depthwise_conv_block_run(unsigned int idx)
- {
- dwc3x3[idx].run();
- bn[2 * idx].run();
- act[2 * idx].run();
- conv1x1[idx].run();
- bn[2 * idx + 1].run();
- act[2 * idx + 1].run();
- }
-
-private:
- unsigned int _batches{ 0 };
- unsigned int _input_spatial_size{ 0 };
-
- ConvolutionLayerFunction conv3x3{};
- BatchNormalizationLayerFunction conv3x3_bn{};
- ActivationLayerFunction conv3x3_act{};
- std::array<ActivationLayerFunction, 26> act{ {} };
- std::array<BatchNormalizationLayerFunction, 26> bn{ {} };
- std::array<DepthwiseConvolutionFunction, 13> dwc3x3{ {} };
- std::array<DirectConvolutionLayerFunction, 13> conv1x1{ {} };
- DirectConvolutionLayerFunction conv1c{};
- PoolingLayerFunction pool{};
- ReshapeFunction reshape{};
- SoftmaxLayerFunction smx{};
-
- TensorType w_conv3x3{}, mean_conv3x3{}, var_conv3x3{}, beta_conv3x3{}, gamma_conv3x3{};
- std::array<TensorType, 13> w_conv{ {} };
- std::array<TensorType, 13> w_dwc{ {} };
- std::array<TensorType, 26> bn_mean{ {} };
- std::array<TensorType, 26> bn_var{ {} };
- std::array<TensorType, 26> bn_beta{ {} };
- std::array<TensorType, 26> bn_gamma{ {} };
- TensorType w_conv1c{}, b_conv1c{};
-
- TensorType input{}, output{};
-
- std::array<TensorType, 15> conv_out{ {} };
- std::array<TensorType, 13> dwc_out{ {} };
- TensorType pool_out{};
- TensorType reshape_out{};
-};
-} // namespace networks
-} // namespace test
-} // namespace arm_compute
-#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENETV1_H__
diff --git a/tests/validation/CL/SYSTEM/AlexNet.cpp b/tests/validation/CL/SYSTEM/AlexNet.cpp
deleted file mode 100644
index 9be6f2cf53..0000000000
--- a/tests/validation/CL/SYSTEM/AlexNet.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/CL/CLSubTensor.h"
-#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
-#include "arm_compute/runtime/CL/functions/CLNormalizationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
-#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
-#include "tests/CL/CLAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/networks/AlexNetNetwork.h"
-#include "tests/validation/Validation.h"
-
-#include <string>
-#include <vector>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-using CLAlexNetModel = networks::AlexNetNetwork<ICLTensor,
- CLTensor,
- CLSubTensor,
- CLAccessor,
- CLActivationLayer,
- CLConvolutionLayer,
- CLDirectConvolutionLayer,
- CLFullyConnectedLayer,
- CLNormalizationLayer,
- CLPoolingLayer,
- CLSoftmaxLayer>;
-std::vector<unsigned int> compute_alexnet(DataType dt, unsigned int batches, std::string input_file)
-{
- std::vector<std::string> weight_files = { "cnn_data/alexnet_model/conv1_w.npy",
- "cnn_data/alexnet_model/conv2_w.npy",
- "cnn_data/alexnet_model/conv3_w.npy",
- "cnn_data/alexnet_model/conv4_w.npy",
- "cnn_data/alexnet_model/conv5_w.npy",
- "cnn_data/alexnet_model/fc6_w.npy",
- "cnn_data/alexnet_model/fc7_w.npy",
- "cnn_data/alexnet_model/fc8_w.npy"
- };
-
- std::vector<std::string> bias_files = { "cnn_data/alexnet_model/conv1_b.npy",
- "cnn_data/alexnet_model/conv2_b.npy",
- "cnn_data/alexnet_model/conv3_b.npy",
- "cnn_data/alexnet_model/conv4_b.npy",
- "cnn_data/alexnet_model/conv5_b.npy",
- "cnn_data/alexnet_model/fc6_b.npy",
- "cnn_data/alexnet_model/fc7_b.npy",
- "cnn_data/alexnet_model/fc8_b.npy"
- };
- CLAlexNetModel network{};
- network.init(dt, batches);
- network.build();
- network.allocate();
- network.fill(weight_files, bias_files);
- network.feed(std::move(input_file));
- network.run();
-
- return network.get_classifications();
-}
-} // namespace
-
-TEST_SUITE(CL)
-TEST_SUITE(SYSTEM_TESTS)
-
-TEST_CASE(AlexNet, framework::DatasetMode::PRECOMMIT)
-{
- // Compute alexnet
- std::vector<unsigned int> classified_labels = compute_alexnet(DataType::F32, 1, "cnn_data/imagenet_data/cat.npy");
-
- // Expected labels
- std::vector<unsigned int> expected_labels = { 281 };
-
- // Validate labels
- validate(classified_labels, expected_labels);
-}
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/CL/SYSTEM/LeNet5.cpp b/tests/validation/CL/SYSTEM/LeNet5.cpp
deleted file mode 100644
index 92dcdea500..0000000000
--- a/tests/validation/CL/SYSTEM/LeNet5.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
-#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
-#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
-#include "tests/CL/CLAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/networks/LeNet5Network.h"
-#include "tests/validation/Validation.h"
-
-#include <string>
-#include <vector>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-using CLLeNet5Model = networks::LeNet5Network<CLTensor,
- CLAccessor,
- CLActivationLayer,
- CLConvolutionLayer,
- CLFullyConnectedLayer,
- CLPoolingLayer,
- CLSoftmaxLayer>;
-std::vector<unsigned int> compute_lenet5(unsigned int batches, std::string input_file)
-{
- std::vector<std::string> weight_files = { "cnn_data/lenet_model/conv1_w.npy",
- "cnn_data/lenet_model/conv2_w.npy",
- "cnn_data/lenet_model/ip1_w.npy",
- "cnn_data/lenet_model/ip2_w.npy"
- };
-
- std::vector<std::string> bias_files = { "cnn_data/lenet_model/conv1_b.npy",
- "cnn_data/lenet_model/conv2_b.npy",
- "cnn_data/lenet_model/ip1_b.npy",
- "cnn_data/lenet_model/ip2_b.npy"
- };
- CLLeNet5Model network{};
- network.init(batches);
- network.build();
- network.allocate();
- network.fill(weight_files, bias_files);
- network.feed(std::move(input_file));
- network.run();
-
- return network.get_classifications();
-}
-} // namespace
-
-TEST_SUITE(CL)
-TEST_SUITE(SYSTEM_TESTS)
-
-TEST_CASE(LeNet5, framework::DatasetMode::PRECOMMIT)
-{
- // Compute alexnet
- std::vector<unsigned int> classified_labels = compute_lenet5(10, "cnn_data/mnist_data/input10.npy");
-
- // Expected labels
- std::vector<unsigned int> expected_labels = { 7, 2, 1, 0, 4, 1, 4, 9, 5, 9 };
-
- // Validate labels
- validate(classified_labels, expected_labels);
-}
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/NEON/SYSTEM/AlexNet.cpp b/tests/validation/NEON/SYSTEM/AlexNet.cpp
deleted file mode 100644
index adcfe72eaa..0000000000
--- a/tests/validation/NEON/SYSTEM/AlexNet.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
-#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
-#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
-#include "arm_compute/runtime/SubTensor.h"
-#include "tests/NEON/Accessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/networks/AlexNetNetwork.h"
-#include "tests/validation/Validation.h"
-
-#include <string>
-#include <vector>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-using NEAlexNetModel = networks::AlexNetNetwork<ITensor,
- Tensor,
- SubTensor,
- Accessor,
- NEActivationLayer,
- NEConvolutionLayer,
- NEDirectConvolutionLayer,
- NEFullyConnectedLayer,
- NENormalizationLayer,
- NEPoolingLayer,
- NESoftmaxLayer>;
-std::vector<unsigned int> compute_alexnet(DataType dt, unsigned int batches, std::string input_file)
-{
- std::vector<std::string> weight_files = { "cnn_data/alexnet_model/conv1_w.npy",
- "cnn_data/alexnet_model/conv2_w.npy",
- "cnn_data/alexnet_model/conv3_w.npy",
- "cnn_data/alexnet_model/conv4_w.npy",
- "cnn_data/alexnet_model/conv5_w.npy",
- "cnn_data/alexnet_model/fc6_w.npy",
- "cnn_data/alexnet_model/fc7_w.npy",
- "cnn_data/alexnet_model/fc8_w.npy"
- };
-
- std::vector<std::string> bias_files = { "cnn_data/alexnet_model/conv1_b.npy",
- "cnn_data/alexnet_model/conv2_b.npy",
- "cnn_data/alexnet_model/conv3_b.npy",
- "cnn_data/alexnet_model/conv4_b.npy",
- "cnn_data/alexnet_model/conv5_b.npy",
- "cnn_data/alexnet_model/fc6_b.npy",
- "cnn_data/alexnet_model/fc7_b.npy",
- "cnn_data/alexnet_model/fc8_b.npy"
- };
- NEAlexNetModel network{};
-
- network.init(dt, batches);
- network.build();
- network.allocate();
- network.fill(weight_files, bias_files);
- network.feed(std::move(input_file));
- network.run();
-
- return network.get_classifications();
-}
-} // namespace
-
-TEST_SUITE(NEON)
-TEST_SUITE(SYSTEM_TESTS)
-
-TEST_CASE(AlexNet, framework::DatasetMode::PRECOMMIT)
-{
- // Compute alexnet
- std::vector<unsigned int> classified_labels = compute_alexnet(DataType::F32, 1, "cnn_data/imagenet_data/cat.npy");
-
- // Expected labels
- std::vector<unsigned int> expected_labels = { 281 };
-
- // Validate labels
- validate(classified_labels, expected_labels);
-}
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/NEON/SYSTEM/LeNet5.cpp b/tests/validation/NEON/SYSTEM/LeNet5.cpp
deleted file mode 100644
index 95a82a6643..0000000000
--- a/tests/validation/NEON/SYSTEM/LeNet5.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
-#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
-#include "tests/NEON/Accessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/networks/LeNet5Network.h"
-#include "tests/validation/Validation.h"
-
-#include <string>
-#include <vector>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-using NELeNet5Model = networks::LeNet5Network<Tensor,
- Accessor,
- NEActivationLayer,
- NEConvolutionLayer,
- NEFullyConnectedLayer,
- NEPoolingLayer,
- NESoftmaxLayer>;
-std::vector<unsigned int> compute_lenet5(unsigned int batches, std::string input_file)
-{
- std::vector<std::string> weight_files = { "cnn_data/lenet_model/conv1_w.npy",
- "cnn_data/lenet_model/conv2_w.npy",
- "cnn_data/lenet_model/ip1_w.npy",
- "cnn_data/lenet_model/ip2_w.npy"
- };
-
- std::vector<std::string> bias_files = { "cnn_data/lenet_model/conv1_b.npy",
- "cnn_data/lenet_model/conv2_b.npy",
- "cnn_data/lenet_model/ip1_b.npy",
- "cnn_data/lenet_model/ip2_b.npy"
- };
- NELeNet5Model network{};
- network.init(batches);
- network.build();
- network.allocate();
- network.fill(weight_files, bias_files);
- network.feed(std::move(input_file));
- network.run();
-
- return network.get_classifications();
-}
-} // namespace
-
-TEST_SUITE(NEON)
-TEST_SUITE(SYSTEM_TESTS)
-
-TEST_CASE(LeNet5, framework::DatasetMode::PRECOMMIT)
-{
- // Compute alexnet
- std::vector<unsigned int> classified_labels = compute_lenet5(10, "cnn_data/mnist_data/input10.npy");
-
- // Expected labels
- std::vector<unsigned int> expected_labels = { 7, 2, 1, 0, 4, 1, 4, 9, 5, 9 };
-
- // Validate labels
- validate(classified_labels, expected_labels);
-}
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace validation
-} // namespace test
-} // namespace arm_compute