diff options
Diffstat (limited to 'tests')
-rw-r--r-- | tests/benchmark/CL/BatchNormalizationLayer.cpp | 26 | ||||
-rw-r--r--[-rwxr-xr-x] | tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp | 28 | ||||
-rw-r--r-- | tests/benchmark/NEON/BatchNormalizationLayer.cpp | 27 | ||||
-rw-r--r-- | tests/benchmark/fixtures/BatchNormalizationLayerFixture.h | 4 | ||||
-rw-r--r-- | tests/datasets/system_tests/mobilenet/MobileNetBatchNormalizationLayerDataset.h | 68 | ||||
-rw-r--r-- | tests/validation/CL/BatchNormalizationLayer.cpp | 49 | ||||
-rw-r--r-- | tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp | 14 | ||||
-rw-r--r-- | tests/validation/NEON/BatchNormalizationLayer.cpp | 45 | ||||
-rw-r--r-- | tests/validation/fixtures/BatchNormalizationLayerFixture.h | 20 | ||||
-rw-r--r-- | tests/validation/reference/BatchNormalizationLayer.cpp | 24 | ||||
-rw-r--r-- | tests/validation/reference/BatchNormalizationLayer.h | 8 |
11 files changed, 257 insertions, 56 deletions
diff --git a/tests/benchmark/CL/BatchNormalizationLayer.cpp b/tests/benchmark/CL/BatchNormalizationLayer.cpp index 0fc8727241..a61e7cc745 100644 --- a/tests/benchmark/CL/BatchNormalizationLayer.cpp +++ b/tests/benchmark/CL/BatchNormalizationLayer.cpp @@ -29,6 +29,7 @@ #include "tests/CL/CLAccessor.h" #include "tests/benchmark/fixtures/BatchNormalizationLayerFixture.h" #include "tests/datasets/system_tests/googlenet/inceptionv4/GoogLeNetInceptionV4BatchNormalizationLayerDataset.h" +#include "tests/datasets/system_tests/mobilenet/MobileNetBatchNormalizationLayerDataset.h" #include "tests/datasets/system_tests/yolo/v2/YOLOV2BatchNormalizationLayerDataset.h" #include "tests/framework/Macros.h" #include "tests/framework/datasets/Datasets.h" @@ -47,24 +48,41 @@ using CLBatchNormalizationLayerFixture = BatchNormalizationLayerFixture<CLTensor TEST_SUITE(CL) +REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL, + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))), + data_types), + framework::dataset::make("Batches", 1))); + REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL, - framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), data_types), framework::dataset::make("Batches", 1))); REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL, - framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), data_types), framework::dataset::make("Batches", 1))); TEST_SUITE(NIGHTLY) + +REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))), + data_types), + framework::dataset::make("Batches", { 4, 8 }))); + REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), data_types), framework::dataset::make("Batches", { 4, 8 }))); REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), data_types), framework::dataset::make("Batches", { 4, 8 }))); TEST_SUITE_END() diff --git a/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp b/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp index 4464ea2401..e615860ec8 100755..100644 --- a/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp +++ b/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -29,6 +29,7 @@ #include "tests/GLES_COMPUTE/GCAccessor.h" #include "tests/benchmark/fixtures/BatchNormalizationLayerFixture.h" #include "tests/datasets/system_tests/googlenet/inceptionv4/GoogLeNetInceptionV4BatchNormalizationLayerDataset.h" +#include "tests/datasets/system_tests/mobilenet/MobileNetBatchNormalizationLayerDataset.h" #include "tests/datasets/system_tests/yolo/v2/YOLOV2BatchNormalizationLayerDataset.h" #include "tests/framework/Macros.h" #include "tests/framework/datasets/Datasets.h" @@ -47,24 +48,41 @@ using GCBatchNormalizationLayerFixture = BatchNormalizationLayerFixture<GCTensor TEST_SUITE(GC) +REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::ALL, + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))), + data_types), + framework::dataset::make("Batches", 1))); + REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::ALL, - framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), framework::dataset::make("ActivationInfo", + ActivationLayerInfo())), data_types), framework::dataset::make("Batches", 1))); REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::ALL, - framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), data_types), framework::dataset::make("Batches", 1))); TEST_SUITE(NIGHTLY) + +REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))), + data_types), + framework::dataset::make("Batches", { 4, 8 }))); + REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), framework::dataset::make("ActivationInfo", + ActivationLayerInfo())), data_types), framework::dataset::make("Batches", { 4, 8 }))); REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), data_types), framework::dataset::make("Batches", { 4, 8 }))); TEST_SUITE_END() diff --git a/tests/benchmark/NEON/BatchNormalizationLayer.cpp b/tests/benchmark/NEON/BatchNormalizationLayer.cpp index 3a7f2c6b63..2aae3a480b 100644 --- a/tests/benchmark/NEON/BatchNormalizationLayer.cpp +++ b/tests/benchmark/NEON/BatchNormalizationLayer.cpp @@ -33,6 +33,7 @@ #include "tests/benchmark/fixtures/BatchNormalizationLayerFixture.h" #include "tests/datasets/system_tests/googlenet/inceptionv4/GoogLeNetInceptionV4BatchNormalizationLayerDataset.h" +#include "tests/datasets/system_tests/mobilenet/MobileNetBatchNormalizationLayerDataset.h" #include "tests/datasets/system_tests/yolo/v2/YOLOV2BatchNormalizationLayerDataset.h" namespace arm_compute @@ -52,21 +53,39 @@ using NEBatchNormalizationLayerFixture = BatchNormalizationLayerFixture<Tensor, TEST_SUITE(NEON) +REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL, + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))), + data_types), + framework::dataset::make("Batches", 1))); REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL, - framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), data_types), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), + data_types), framework::dataset::make("Batches", 1))); REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL, - framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), data_types), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), + data_types), framework::dataset::make("Batches", 1))); TEST_SUITE(NIGHTLY) +REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))), + data_types), + framework::dataset::make("Batches", { 4, 8 }))); REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), data_types), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), + data_types), framework::dataset::make("Batches", { 4, 8 }))); REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), data_types), + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), + data_types), framework::dataset::make("Batches", { 4, 8 }))); TEST_SUITE_END() TEST_SUITE_END() diff --git a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h index 38a3263203..a031ec6d96 100644 --- a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h +++ b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h @@ -40,7 +40,7 @@ class BatchNormalizationLayerFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape tensor_shape, TensorShape param_shape, float epsilon, DataType data_type, int batches) + void setup(TensorShape tensor_shape, TensorShape param_shape, float epsilon, ActivationLayerInfo act_info, DataType data_type, int batches) { // Set batched in source and destination shapes const unsigned int fixed_point_position = 4; @@ -55,7 +55,7 @@ public: gamma = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position); // Create and configure function - batch_norm_layer.configure(&src, &dst, &mean, &variance, &beta, &gamma, epsilon); + batch_norm_layer.configure(&src, &dst, &mean, &variance, &beta, &gamma, epsilon, act_info); // Allocate tensors src.allocator()->allocate(); diff --git a/tests/datasets/system_tests/mobilenet/MobileNetBatchNormalizationLayerDataset.h b/tests/datasets/system_tests/mobilenet/MobileNetBatchNormalizationLayerDataset.h new file mode 100644 index 0000000000..d09ff02ef9 --- /dev/null +++ b/tests/datasets/system_tests/mobilenet/MobileNetBatchNormalizationLayerDataset.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2017-2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_MOBILENET_BATCHNORMALIZATION_LAYER_DATASET +#define ARM_COMPUTE_TEST_MOBILENET_BATCHNORMALIZATION_LAYER_DATASET + +#include "tests/datasets/BatchNormalizationLayerDataset.h" + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +namespace test +{ +namespace datasets +{ +class MobileNetBatchNormalizationLayerDataset final : public BatchNormalizationLayerDataset +{ +public: + MobileNetBatchNormalizationLayerDataset() + { + // conv1_bn, dwc0_bn + add_config(TensorShape(112U, 112U, 32U), TensorShape(32U), 0.001f); + // pwc0_bn + add_config(TensorShape(112U, 112U, 64U), TensorShape(64U), 0.001f); + // dwc1_bn + add_config(TensorShape(56U, 56U, 64U), TensorShape(64U), 0.001f); + // dwc2_bn, pwc1_bn, pwc2_bn + add_config(TensorShape(56U, 56U, 128U), TensorShape(128U), 0.001f); + // dwc3_bn + add_config(TensorShape(28U, 28U, 128U), TensorShape(128U), 0.001f); + // dwc4_bn, pwc3_bn, pwc4_bn + add_config(TensorShape(28U, 28U, 256U), TensorShape(256U), 0.001f); + // dwc5_bn + add_config(TensorShape(14U, 14U, 256U), TensorShape(256U), 0.001f); + // dwc6_bn, dwc7_bn, dwc8_bn, dwc9_bn, dwc10_bn, pwc5_bn, pwc6_bn, pwc7_bn, pwc8_bn, pwc9_bn, pwc10_bn + add_config(TensorShape(14U, 14U, 512U), TensorShape(512U), 0.001f); + // dwc11_bn + add_config(TensorShape(7U, 7U, 512U), TensorShape(512U), 0.001f); + // dwc12_bn, pwc11_bn, pwc12_bn + add_config(TensorShape(7U, 7U, 1024U), TensorShape(1024U), 0.001f); + } +}; +} // namespace datasets +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_MOBILENET_BATCHNORMALIZATION_LAYER_DATASET */ diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp index 30dd70a66a..ef535153f2 100644 --- a/tests/validation/CL/BatchNormalizationLayer.cpp +++ b/tests/validation/CL/BatchNormalizationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -47,6 +47,12 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f); /**< Tolerance value constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ constexpr AbsoluteTolerance<float> tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */ constexpr AbsoluteTolerance<float> tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */ +const auto act_infos = framework::dataset::make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f), +}); } // namespace TEST_SUITE(CL) @@ -80,13 +86,16 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran // *INDENT-OFF* // clang-format off -DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Unsupported fused activation + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), @@ -96,6 +105,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), TensorInfo(), })), @@ -106,16 +118,31 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(5U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), + TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), + })), + framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f), + ActivationLayerInfo(), + ActivationLayerInfo(), })), - framework::dataset::make("Expected", { true, false, false, false, false, false, true, true})), - input_info, output_info, mvbg_info, expected) + framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, false, true, true})), + input_info, output_info, mvbg_info, act_info, expected) { const auto &mean_info = mvbg_info; const auto &var_info = mvbg_info; const auto &beta_info = mvbg_info; const auto &gamma_info = mvbg_info; - bool has_error = bool(CLBatchNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), &mean_info.clone()->set_is_resizable(false), &var_info.clone()->set_is_resizable(false), &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f)); + bool has_error = bool(CLBatchNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), &mean_info.clone()->set_is_resizable(false), &var_info.clone()->set_is_resizable(false), &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f, act_info)); ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS); } // clang-format on @@ -123,7 +150,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TEST_SUITE(Float) TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + act_infos), framework::dataset::make("DataType", DataType::F32))) { // Validate output @@ -132,7 +160,8 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framewor TEST_SUITE_END() TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))), framework::dataset::make("DataType", DataType::F16))) { // Validate output @@ -146,7 +175,8 @@ template <typename T> using CLBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>; TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), framework::dataset::make("DataType", DataType::QS8)), framework::dataset::make("FractionalBits", 1, 6))) { @@ -156,7 +186,8 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t TEST_SUITE_END() TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), framework::dataset::make("DataType", DataType::QS16)), framework::dataset::make("FractionalBits", 1, 14))) { diff --git a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp index a82149bdcc..d817fc0e67 100644 --- a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp +++ b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -45,6 +45,12 @@ namespace { constexpr AbsoluteTolerance<float> tolerance_f(0.00001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +const auto act_infos = framework::dataset::make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f), +}); } // namespace TEST_SUITE(GC) @@ -78,7 +84,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran TEST_SUITE(Float) TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + act_infos), framework::dataset::make("DataType", DataType::F16))) { // Validate output @@ -87,7 +94,8 @@ FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework TEST_SUITE_END() TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + act_infos), framework::dataset::make("DataType", DataType::F32))) { // Validate output diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp index dfa32bbb07..3501c359db 100644 --- a/tests/validation/NEON/BatchNormalizationLayer.cpp +++ b/tests/validation/NEON/BatchNormalizationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -49,6 +49,12 @@ constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value fo #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ constexpr AbsoluteTolerance<float> tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */ constexpr AbsoluteTolerance<float> tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */ +const auto act_infos = framework::dataset::make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f), +}); } // namespace TEST_SUITE(NEON) @@ -82,13 +88,15 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran // *INDENT-OFF* // clang-format off -DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), @@ -98,6 +106,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), TensorInfo(), })), @@ -108,10 +118,23 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(5U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { true, false, false, false, false, false, true, true})), - input_info, output_info, mvbg_info, expected) + framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f), + ActivationLayerInfo(), + ActivationLayerInfo(), + })), + framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, true, true})), + input_info, output_info, mvbg_info, act_info, expected) { const auto &mean_info = mvbg_info; const auto &var_info = mvbg_info; @@ -120,14 +143,15 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( bool has_error = bool(NEBatchNormalizationLayer::validate( &input_info.clone()->set_is_resizable(false), output_info.total_size() ? &output_info.clone()->set_is_resizable(false) : nullptr, &mean_info.clone()->set_is_resizable(false), &var_info.clone()->set_is_resizable(false), - &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f)); + &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f, act_info)); ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* TEST_SUITE(Float) -FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + act_infos), framework::dataset::make("DataType", DataType::F32))) { // Validate output @@ -137,7 +161,8 @@ TEST_SUITE_END() #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(Float16) -FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + act_infos), framework::dataset::make("DataType", DataType::F16))) { // Validate output @@ -151,7 +176,8 @@ template <typename T> using NEBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>; TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), framework::dataset::make("DataType", DataType::QS8)), framework::dataset::make("FractionalBits", 1, 6))) { @@ -161,7 +187,8 @@ FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t TEST_SUITE_END() TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(), +FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo())), framework::dataset::make("DataType", DataType::QS16)), framework::dataset::make("FractionalBits", 1, 14))) { diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h index 298c9ca411..e02c619249 100644 --- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h +++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -45,12 +45,12 @@ class BatchNormalizationLayerValidationFixedPointFixture : public framework::Fix { public: template <typename...> - void setup(TensorShape shape0, TensorShape shape1, float epsilon, DataType dt, int fractional_bits) + void setup(TensorShape shape0, TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fractional_bits) { _fractional_bits = fractional_bits; _data_type = dt; - _target = compute_target(shape0, shape1, epsilon, dt, fractional_bits); - _reference = compute_reference(shape0, shape1, epsilon, dt, fractional_bits); + _target = compute_target(shape0, shape1, epsilon, act_info, dt, fractional_bits); + _reference = compute_reference(shape0, shape1, epsilon, act_info, dt, fractional_bits); } protected: @@ -85,7 +85,7 @@ protected: } } - TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, float epsilon, DataType dt, int fixed_point_position) + TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fixed_point_position) { // Create tensors TensorType src = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position); @@ -97,7 +97,7 @@ protected: // Create and configure function FunctionType norm; - norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon); + norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon, act_info); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -130,7 +130,7 @@ protected: return dst; } - SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, DataType dt, int fixed_point_position) + SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fixed_point_position) { // Create reference SimpleTensor<T> ref_src{ shape0, dt, 1, fixed_point_position }; @@ -142,7 +142,7 @@ protected: // Fill reference fill(ref_src, ref_mean, ref_var, ref_beta, ref_gamma); - return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, fixed_point_position); + return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info, fixed_point_position); } TensorType _target{}; @@ -156,9 +156,9 @@ class BatchNormalizationLayerValidationFixture : public BatchNormalizationLayerV { public: template <typename...> - void setup(TensorShape shape0, TensorShape shape1, float epsilon, DataType dt) + void setup(TensorShape shape0, TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt) { - BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, dt, 0); + BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, act_info, dt, 0); } }; } // namespace validation diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp index e4446d1694..a9d9f0320d 100644 --- a/tests/validation/reference/BatchNormalizationLayer.cpp +++ b/tests/validation/reference/BatchNormalizationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -23,6 +23,8 @@ */ #include "BatchNormalizationLayer.h" +#include "ActivationLayer.h" + #include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" @@ -37,8 +39,9 @@ namespace reference // Batch Normalization Layer for fixed point type template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type *> SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon, - int fixed_point_position) + ActivationLayerInfo act_info, int fixed_point_position) { + ARM_COMPUTE_UNUSED(act_info); SimpleTensor<T> result(src.shape(), src.data_type()); const auto cols = static_cast<int>(src.shape()[0]); @@ -79,7 +82,7 @@ SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const Simp // Batch Normalization Layer for floating point type template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type *> SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon, - int fixed_point_position) + ActivationLayerInfo act_info, int fixed_point_position) { ARM_COMPUTE_UNUSED(fixed_point_position); @@ -103,21 +106,28 @@ SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const Simp const float numerator = src[pos] - mean[i]; const float x_bar = numerator / denominator; result[pos] = beta[i] + x_bar * gamma[i]; + ; } } } } + + if(act_info.enabled()) + { + result = activation_layer(result, act_info); + } + return result; } template SimpleTensor<float> batch_normalization_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &mean, const SimpleTensor<float> &var, const SimpleTensor<float> &beta, - const SimpleTensor<float> &gamma, float epsilon, int fixed_point_position); + const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); template SimpleTensor<int8_t> batch_normalization_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &mean, const SimpleTensor<int8_t> &var, const SimpleTensor<int8_t> &beta, - const SimpleTensor<int8_t> &gamma, float epsilon, int fixed_point_position); + const SimpleTensor<int8_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); template SimpleTensor<int16_t> batch_normalization_layer(const SimpleTensor<int16_t> &src, const SimpleTensor<int16_t> &mean, const SimpleTensor<int16_t> &var, const SimpleTensor<int16_t> &beta, - const SimpleTensor<int16_t> &gamma, float epsilon, int fixed_point_position); + const SimpleTensor<int16_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); template SimpleTensor<half> batch_normalization_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &mean, const SimpleTensor<half> &var, const SimpleTensor<half> &beta, - const SimpleTensor<half> &gamma, float epsilon, int fixed_point_position); + const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); } // namespace reference } // namespace validation diff --git a/tests/validation/reference/BatchNormalizationLayer.h b/tests/validation/reference/BatchNormalizationLayer.h index 1a554adf7e..329909dab4 100644 --- a/tests/validation/reference/BatchNormalizationLayer.h +++ b/tests/validation/reference/BatchNormalizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,11 +37,13 @@ namespace reference { template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr> SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon, - int fixed_point_position); + ActivationLayerInfo act_info, + int fixed_point_position); template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr> SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon, - int fixed_point_position); + ActivationLayerInfo act_info, + int fixed_point_position); } // namespace reference } // namespace validation } // namespace test |