From 3e84bb662f9c6c3f77837640b44c41b7e3403ed4 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Wed, 13 Nov 2019 17:24:43 +0000 Subject: COMPMID-2920: NEInstanceNormalization fails on NHWC validations Improved TensorInfo to accept DataLayout, useful to test the validate functions Removing nighlies tests Moving all vpadds instructions in add.h Change-Id: I96290a6f26272eae865dba48bbc3c6aee4bc0214 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/2287 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas --- .../kernels/CLInstanceNormalizationLayerKernel.h | 6 +-- .../kernels/NEInstanceNormalizationLayerKernel.h | 4 +- arm_compute/core/NEON/wrapper/intrinsics/add.h | 20 ++++++++ .../core/NEON/wrapper/intrinsics/intrinsics.h | 1 - arm_compute/core/NEON/wrapper/intrinsics/padd.h | 53 ---------------------- arm_compute/core/TensorInfo.h | 9 ++++ .../kernels/CLInstanceNormalizationLayerKernel.cpp | 3 +- .../kernels/NEInstanceNormalizationLayerKernel.cpp | 4 +- src/core/TensorInfo.cpp | 7 +++ .../functions/NEInstanceNormalizationLayer.cpp | 2 +- tests/validation/CL/InstanceNormalizationLayer.cpp | 28 +++--------- .../validation/NEON/InstanceNormalizationLayer.cpp | 27 +++-------- 12 files changed, 59 insertions(+), 105 deletions(-) delete mode 100644 arm_compute/core/NEON/wrapper/intrinsics/padd.h diff --git a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h index bc016d1ceb..00a8a346d9 100644 --- a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h @@ -49,7 +49,8 @@ public: /** Set the input and output tensors. * - * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW + * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC + * In case of @p output tensor = nullptr this tensor will store the result of the normalization. * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input. * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0 * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0 @@ -59,8 +60,7 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer. * - * @param[in] input Source tensor info. In case of @p output tensor = nullptr this tensor will store the result of the normalization. - * Data types supported: F16/F32. Data layout supported: NHWC, NCHW + * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input. * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0 * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0 diff --git a/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h index 9745d266b8..c34119796d 100644 --- a/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h @@ -53,6 +53,7 @@ public: /** Set the input and output tensors. * * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW + * In case of @p output tensor = nullptr this tensor will store the result of the normalization. * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input. * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0 * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0 @@ -62,8 +63,7 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref NEInstanceNormalizationLayer. * - * @param[in] input Source tensor info. In case of @p output tensor = nullptr this tensor will store the result of the normalization. - * Data types supported: F16/F32. Data layout supported: NCHW + * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NCHW * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input. * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0 * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0 diff --git a/arm_compute/core/NEON/wrapper/intrinsics/add.h b/arm_compute/core/NEON/wrapper/intrinsics/add.h index 1839170485..f0823463af 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/add.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/add.h @@ -176,6 +176,26 @@ VPADDL_IMPL(int32x4_t, int16x8_t, vpaddlq, s16) VPADDL_IMPL(uint64x2_t, uint32x4_t, vpaddlq, u32) VPADDL_IMPL(int64x2_t, int32x4_t, vpaddlq, s32) #undef VPADDL_IMPL + +// VPADD: Add pairwise +#define VPADD_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vpadd(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VPADD_IMPL(uint8x8_t, uint8x8_t, vpadd, u8) +VPADD_IMPL(int8x8_t, int8x8_t, vpadd, s8) +VPADD_IMPL(uint16x4_t, uint16x4_t, vpadd, u16) +VPADD_IMPL(int16x4_t, int16x4_t, vpadd, s16) +VPADD_IMPL(uint32x2_t, uint32x2_t, vpadd, u32) +VPADD_IMPL(int32x2_t, int32x2_t, vpadd, s32) +VPADD_IMPL(float32x2_t, float32x2_t, vpadd, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VPADD_IMPL(float16x4_t, float16x4_t, vpadd, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VPADD_IMPL } // namespace wrapper } // namespace arm_compute #endif /* __ARM_COMPUTE_WRAPPER_ADD_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h index 6eae1cf801..d9b8297cb9 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h @@ -53,7 +53,6 @@ #include "arm_compute/core/NEON/wrapper/intrinsics/neg.h" #include "arm_compute/core/NEON/wrapper/intrinsics/not.h" #include "arm_compute/core/NEON/wrapper/intrinsics/orr.h" -#include "arm_compute/core/NEON/wrapper/intrinsics/padd.h" #include "arm_compute/core/NEON/wrapper/intrinsics/pmax.h" #include "arm_compute/core/NEON/wrapper/intrinsics/pmin.h" #include "arm_compute/core/NEON/wrapper/intrinsics/pow.h" diff --git a/arm_compute/core/NEON/wrapper/intrinsics/padd.h b/arm_compute/core/NEON/wrapper/intrinsics/padd.h deleted file mode 100644 index 5ee2173df8..0000000000 --- a/arm_compute/core/NEON/wrapper/intrinsics/padd.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_WRAPPER_PADD_H__ -#define __ARM_COMPUTE_WRAPPER_PADD_H__ - -#include - -namespace arm_compute -{ -namespace wrapper -{ -#define VPADD_IMPL(stype, vtype, prefix, postfix) \ - inline vtype vpadd(const vtype &a, const vtype &b) \ - { \ - return prefix##_##postfix(a, b); \ - } - -VPADD_IMPL(uint8x8_t, uint8x8_t, vpadd, u8) -VPADD_IMPL(int8x8_t, int8x8_t, vpadd, s8) -VPADD_IMPL(uint16x4_t, uint16x4_t, vpadd, u16) -VPADD_IMPL(int16x4_t, int16x4_t, vpadd, s16) -VPADD_IMPL(uint32x2_t, uint32x2_t, vpadd, u32) -VPADD_IMPL(int32x2_t, int32x2_t, vpadd, s32) -VPADD_IMPL(float32x2_t, float32x2_t, vpadd, f32) -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -VPADD_IMPL(float16x4_t, float16x4_t, vpadd, f16) -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - -#undef VPADD_IMPL -} // namespace wrapper -} // namespace arm_compute -#endif /* __ARM_COMPUTE_WRAPPER_PADD_H__ */ diff --git a/arm_compute/core/TensorInfo.h b/arm_compute/core/TensorInfo.h index d1a64f59ef..a68f769c51 100644 --- a/arm_compute/core/TensorInfo.h +++ b/arm_compute/core/TensorInfo.h @@ -99,6 +99,15 @@ public: */ TensorInfo(const TensorShape &tensor_shape, size_t num_channels, DataType data_type); + /** Constructor + * + * @param[in] tensor_shape It specifies the size for each dimension of the tensor in number of elements. + * @param[in] num_channels It indicates the number of channels for each tensor element + * @param[in] data_type Data type to use for each tensor element + * @param[in] data_layout The data layout setting for the tensor data. + */ + TensorInfo(const TensorShape &tensor_shape, size_t num_channels, DataType data_type, DataLayout data_layout); + /** Constructor * * @param[in] tensor_shape It specifies the size for each dimension of the tensor in number of elements. diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp index a03322b61d..0f208573a1 100644 --- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp @@ -44,13 +44,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, f ARM_COMPUTE_UNUSED(beta); ARM_COMPUTE_RETURN_ERROR_ON_MSG(epsilon == 0.f, "Epsilon must be different than 0"); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32); if(output != nullptr && output->total_size() != 0) { ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels"); } return Status{}; diff --git a/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp index 31d982c4e3..7fc93617b2 100644 --- a/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp +++ b/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp @@ -145,7 +145,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, f ARM_COMPUTE_UNUSED(beta); ARM_COMPUTE_RETURN_ERROR_ON_MSG(epsilon == 0.f, "Epsilon must be different than 0"); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_layout() == DataLayout::NHWC, "NHWC data layout is not supported by the kernel directly"); if(output != nullptr && output->total_size() != 0) @@ -153,8 +153,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, f ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels"); } - return Status{}; } diff --git a/src/core/TensorInfo.cpp b/src/core/TensorInfo.cpp index cd36e8be2c..cc13b39a24 100644 --- a/src/core/TensorInfo.cpp +++ b/src/core/TensorInfo.cpp @@ -91,6 +91,13 @@ TensorInfo::TensorInfo(const TensorShape &tensor_shape, size_t num_channels, Dat _quantization_info = std::move(quantization_info); } +TensorInfo::TensorInfo(const TensorShape &tensor_shape, size_t num_channels, DataType data_type, DataLayout data_layout) + : TensorInfo() +{ + init(tensor_shape, num_channels, data_type); + _data_layout = data_layout; +} + TensorInfo::TensorInfo(const HOGInfo &hog_info, unsigned int width, unsigned int height) : TensorInfo() { diff --git a/src/runtime/NEON/functions/NEInstanceNormalizationLayer.cpp b/src/runtime/NEON/functions/NEInstanceNormalizationLayer.cpp index 295f80af95..d7cb7de627 100644 --- a/src/runtime/NEON/functions/NEInstanceNormalizationLayer.cpp +++ b/src/runtime/NEON/functions/NEInstanceNormalizationLayer.cpp @@ -64,7 +64,7 @@ void NEInstanceNormalizationLayer::configure(ITensor *input, ITensor *output, fl Status NEInstanceNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon) { - return NEInstanceNormalizationLayerKernel::validate(input, &output->clone()->set_data_layout(DataLayout::NCHW), gamma, beta, epsilon); + return NEInstanceNormalizationLayerKernel::validate(&input->clone()->set_data_layout(DataLayout::NCHW), &output->clone()->set_data_layout(DataLayout::NCHW), gamma, beta, epsilon); } void NEInstanceNormalizationLayer::run() diff --git a/tests/validation/CL/InstanceNormalizationLayer.cpp b/tests/validation/CL/InstanceNormalizationLayer.cpp index 165ab1fa9c..06de9e5303 100644 --- a/tests/validation/CL/InstanceNormalizationLayer.cpp +++ b/tests/validation/CL/InstanceNormalizationLayer.cpp @@ -43,8 +43,8 @@ namespace validation namespace { /** Tolerance for float operations */ -AbsoluteTolerance tolerance_f32(0.001f); -AbsoluteTolerance tolerance_f16(2.f); +AbsoluteTolerance tolerance_f32(0.0015f); +AbsoluteTolerance tolerance_f16(0.5f); } // namespace TEST_SUITE(CL) @@ -57,6 +57,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1 TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32 + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), @@ -66,12 +68,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32) })), - framework::dataset::make("Expected", { false, false, false, false, true, true, true, true })), + framework::dataset::make("Expected", { false, false, false, false, true, true, true, true, true, true })), input_info, output_info, expected) { bool is_valid = bool(CLInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), @@ -96,15 +100,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture, fra validate(CLAccessor(_target), _reference, tolerance_f32); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLInstanceNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::Large4DShapes(), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("InPlace", { false, true }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_f32); -} TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture, framework::DatasetMode::PRECOMMIT, @@ -117,15 +112,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture, fram validate(CLAccessor(_target), _reference, tolerance_f16); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLInstanceNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::LargeShapes(), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("InPlace", { false, true }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_f16); -} TEST_SUITE_END() // FP16 TEST_SUITE_END() // InstanceNormalizationLayer TEST_SUITE_END() // CL diff --git a/tests/validation/NEON/InstanceNormalizationLayer.cpp b/tests/validation/NEON/InstanceNormalizationLayer.cpp index b4be6ba109..d2a80f24f7 100644 --- a/tests/validation/NEON/InstanceNormalizationLayer.cpp +++ b/tests/validation/NEON/InstanceNormalizationLayer.cpp @@ -45,7 +45,7 @@ namespace /** Tolerance for float operations */ AbsoluteTolerance tolerance_f32(0.0015f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -AbsoluteTolerance tolerance_f16(0.2f); +AbsoluteTolerance tolerance_f16(0.5f); #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC } // namespace @@ -59,6 +59,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1 TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32 + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), @@ -68,12 +70,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32) })), - framework::dataset::make("Expected", { false, false, false, false, true, true, true, true })), + framework::dataset::make("Expected", { false, false, false, false, true, true, true, true, true, true })), input_info, output_info, expected) { bool is_valid = bool(NEInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), @@ -98,15 +102,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture, fra validate(Accessor(_target), _reference, tolerance_f32); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::Large4DShapes(), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("InPlace", { false, true }))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_f32); -} TEST_SUITE_END() // FP32 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC @@ -120,16 +115,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture, fram // Validate output validate(Accessor(_target), _reference, tolerance_f16); } - -FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::LargeShapes(), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("InPlace", { false, true }))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_f16); -} TEST_SUITE_END() // FP16 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -- cgit v1.2.1