aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2018-11-22 11:22:18 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2018-11-23 17:02:27 +0000
commit0c71d0ba75a11720e39e2a7163e993d51350683d (patch)
tree089f7b293802944a7672c85f637141aad0b55c75
parentaaa27189e0e75c3ebad57854ac8901d0140677ac (diff)
downloadComputeLibrary-0c71d0ba75a11720e39e2a7163e993d51350683d.tar.gz
COMPMID-1647 NENormalizationLayer IN_MAP_2D support for NHWC for FP32/FP16
Change-Id: Id74cc7ba8e5cabee6acd3798d4779f88b1f00a9b
-rw-r--r--arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h14
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h1
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/mla.h13
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/pow.h48
-rw-r--r--arm_compute/runtime/NEON/functions/NENormalizationLayer.h8
-rw-r--r--src/core/NEON/kernels/NENormalizationLayerKernel.cpp141
-rw-r--r--tests/validation/NEON/NormalizationLayer.cpp8
7 files changed, 137 insertions, 96 deletions
diff --git a/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h
index 92086437a6..533335f9af 100644
--- a/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h
@@ -54,20 +54,20 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data types supported: FP16/F32.
+ * and an optional 4th dimension for batch of inputs. Data types supported: FP16/F32. Data layouts supported: NCHW/NHWC.
* @param[in] input_squared Source with each element has been squared. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * Data type supported: same as @p input
- * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
+ * Data type and layout supported: same as @p input.
+ * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type and layout supported: same as @p input.
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
void configure(const ITensor *input, const ITensor *input_squared, ITensor *output, NormalizationLayerInfo norm_info);
/** Static function to check if given info will lead to a valid configuration of @ref NENormalizationLayerKernel
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data types supported: FP16/F32.
+ * and an optional 4th dimension for batch of inputs. Data types supported: FP16/F32. Data layouts supported: NCHW/NHWC.
* @param[in] input_squared Source with each element has been squared. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * Data type supported: same as @p input
- * @param[in] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
+ * Data type and layout supported: same as @p input.
+ * @param[in] output Destination tensor. Output will have the same number of dimensions as input. Data type and layout supported: same as @p input.
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*
* @return a status
@@ -89,7 +89,7 @@ private:
*
* @param[in] window Region on which to execute the kernel.
*/
- template <DataType dt, unsigned int dim, bool do_2D_norm>
+ template <typename T, unsigned int S, unsigned int dim, bool do_2D_norm>
void normalize_float(const Window &window);
/** Common signature for all the specialised normalization functions
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index 7ea0aba565..77787afcf4 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -42,6 +42,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/mul.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/neg.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/padd.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/pow.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
#endif /* __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/mla.h b/arm_compute/core/NEON/wrapper/intrinsics/mla.h
index 32a650b57f..db6d7b957a 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/mla.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/mla.h
@@ -35,6 +35,13 @@ namespace wrapper
{ \
return prefix##_##postfix(a, b, c); \
}
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#define VMLA_IMPL2(stype, vtype, prefix1, prefix2, postfix) \
+ inline vtype vmla(const vtype &a, const vtype &b, const vtype &c) \
+ { \
+ return prefix1##_##postfix(a, prefix2##_##postfix(b, c)); \
+ }
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
VMLA_IMPL(uint8x8_t, uint8x8_t, vmla, u8)
VMLA_IMPL(int8x8_t, int8x8_t, vmla, s8)
@@ -43,6 +50,9 @@ VMLA_IMPL(int16x4_t, int16x4_t, vmla, s16)
VMLA_IMPL(uint32x2_t, uint32x2_t, vmla, u32)
VMLA_IMPL(int32x2_t, int32x2_t, vmla, s32)
VMLA_IMPL(float32x2_t, float32x2_t, vmla, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMLA_IMPL2(float16x4_t, float16x4_t, vadd, vmul, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
VMLA_IMPL(uint8x16_t, uint8x16_t, vmlaq, u8)
VMLA_IMPL(int8x16_t, int8x16_t, vmlaq, s8)
@@ -51,6 +61,9 @@ VMLA_IMPL(int16x8_t, int16x8_t, vmlaq, s16)
VMLA_IMPL(uint32x4_t, uint32x4_t, vmlaq, u32)
VMLA_IMPL(int32x4_t, int32x4_t, vmlaq, s32)
VMLA_IMPL(float32x4_t, float32x4_t, vmlaq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMLA_IMPL2(float16x8_t, float16x8_t, vaddq, vmulq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
#undef VMLA_IMPL
} // namespace wrapper
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/pow.h b/arm_compute/core/NEON/wrapper/intrinsics/pow.h
new file mode 100644
index 0000000000..865df416ee
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/pow.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_POW_H__
+#define __ARM_COMPUTE_WRAPPER_POW_H__
+
+#include "arm_compute/core/NEON/NEMath.h"
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VPOW_IMPL(vtype, prefix, postfix) \
+ inline vtype vpow(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VPOW_IMPL(float32x4_t, vpowq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VPOW_IMPL(float16x8_t, vpowq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VPOW_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_POW_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
index 4f1f32fba5..d994093e1d 100644
--- a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
@@ -55,16 +55,16 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32
- * @param[out] output Destination with the same dimensions, data type and number of channels of @p input
+ * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32. Data layouts supported: NCHW/NHWC.
+ * @param[out] output Destination with the same dimensions, data type, data layout and number of channels of @p input
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
void configure(const ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info);
/** Static function to check if given info will lead to a valid configuration of @ref NENormalizationLayer
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32
- * @param[in] output Destination with the same dimensions, data type and number of channels of @p input
+ * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32. Data layouts supported: NCHW/NHWC.
+ * @param[in] output Destination with the same dimensions, data type, data layout and number of channels of @p input
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*
* @return a status
diff --git a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp
index 27af121ce5..e5f6e4f41a 100644
--- a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp
@@ -29,6 +29,7 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/NEON/NEFixedPoint.h"
#include "arm_compute/core/NEON/NEMath.h"
+#include "arm_compute/core/NEON/wrapper/wrapper.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
@@ -44,8 +45,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *input_squ
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_layout() == DataLayout::NHWC && norm_info.type() == NormType::IN_MAP_2D,
- "Only Cross-map and 1D In-map normalization is supported for NHWC layout");
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, input_squared);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, input_squared);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(norm_info.norm_size() % 2), "Normalization size should be odd");
@@ -55,6 +54,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *input_squ
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
}
return Status{};
@@ -143,16 +143,26 @@ void NENormalizationLayerKernel::configure(const ITensor *input, const ITensor *
{
if(norm_info.type() == NormType::IN_MAP_2D)
{
- _func = &NENormalizationLayerKernel::normalize_float<DataType::F32, 0, true>;
+ _func = &NENormalizationLayerKernel::normalize_float<float, 4, 0, true>;
}
else
{
- _func = &NENormalizationLayerKernel::normalize_float<DataType::F32, 0, false>;
+ _func = &NENormalizationLayerKernel::normalize_float<float, 4, 0, false>;
}
break;
}
+ case 1:
+ if(norm_info.type() == NormType::IN_MAP_2D)
+ {
+ _func = &NENormalizationLayerKernel::normalize_float<float, 4, 1, true>;
+ }
+ else
+ {
+ _func = &NENormalizationLayerKernel::normalize_float<float, 4, 1, false>;
+ }
+ break;
case 2:
- _func = &NENormalizationLayerKernel::normalize_float<DataType::F32, 2, false>;
+ _func = &NENormalizationLayerKernel::normalize_float<float, 4, 2, false>;
break;
default:
break;
@@ -168,16 +178,26 @@ void NENormalizationLayerKernel::configure(const ITensor *input, const ITensor *
{
if(norm_info.type() == NormType::IN_MAP_2D)
{
- _func = &NENormalizationLayerKernel::normalize_float<DataType::F16, 0, true>;
+ _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 0, true>;
}
else
{
- _func = &NENormalizationLayerKernel::normalize_float<DataType::F16, 0, false>;
+ _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 0, false>;
}
break;
}
+ case 1:
+ if(norm_info.type() == NormType::IN_MAP_2D)
+ {
+ _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 1, true>;
+ }
+ else
+ {
+ _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 1, false>;
+ }
+ break;
case 2:
- _func = &NENormalizationLayerKernel::normalize_float<DataType::F16, 2, false>;
+ _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 2, false>;
break;
default:
break;
@@ -195,14 +215,17 @@ void NENormalizationLayerKernel::configure(const ITensor *input, const ITensor *
INEKernel::configure(win_config.second);
}
-template <DataType dt, unsigned int dim, bool do_2D_norm>
+template <typename T, unsigned int S, unsigned int dim, bool do_2D_norm>
void NENormalizationLayerKernel::normalize_float(const Window &window)
{
+ /** NEON vector tag type. */
+ using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
+
Iterator input(_input, window);
Iterator input_squared(_input_squared, window);
Iterator output(_output, window);
- const int dim_y = 1;
+ const int dim_y = _input->info()->data_layout() == DataLayout::NCHW ? 1 : 2;
const int radius = _norm_info.norm_size() / 2;
const int input_squared_stride = _input_squared->info()->strides_in_bytes()[dim];
// We account padding across X only and we iterate over rows
@@ -210,83 +233,39 @@ void NENormalizationLayerKernel::normalize_float(const Window &window)
const int max_right = _input->info()->dimension(dim) - 1;
const int max_bottom = _input->info()->dimension(dim_y) - 1;
- if(dt == DataType::F32)
- {
- const float32x4_t coeff_vec = vdupq_n_f32(_norm_info.scale_coeff());
- const float32x4_t beta_vec = vdupq_n_f32(_norm_info.beta());
- const float32x4_t kappa_vec = vdupq_n_f32(_norm_info.kappa());
+ const auto coeff_vec = wrapper::vdup_n(static_cast<T>(_norm_info.scale_coeff()), ExactTagType{});
+ const auto beta_vec = wrapper::vdup_n(static_cast<T>(_norm_info.beta()), ExactTagType{});
+ const auto kappa_vec = wrapper::vdup_n(static_cast<T>(_norm_info.kappa()), ExactTagType{});
- execute_window_loop(window, [&](const Coordinates & id)
- {
- // Get range to normalize
- const int current_row = do_2D_norm ? id[dim_y] : 0;
- const int current_slice = id[dim];
- const int first_row = do_2D_norm ? std::max(current_row - radius, 0) : 0;
- const int last_row = do_2D_norm ? std::min(current_row + radius, max_bottom) : 0;
- const int first_slice = std::max(current_slice - radius, min_left);
- const int last_slice = std::min(current_slice + radius, max_right);
-
- // Accumulate 2D In-Map values
- float32x4_t accu = vdupq_n_f32(0.f);
- for(int j = first_row; j <= last_row; j++)
- {
- // Compute row displacement
- const int row = (j - current_row) * _input_squared->info()->strides_in_bytes()[dim_y];
- const uint8_t *const input_squared_ptr = input_squared.ptr() + row - (current_slice * input_squared_stride);
- for(int i = first_slice; i <= last_slice; ++i)
- {
- accu = vaddq_f32(accu, vld1q_f32(reinterpret_cast<const float *>(input_squared_ptr + i * input_squared_stride)));
- }
- }
-
- // Normalize
- const float32x4_t normalized = vpowq_f32(vmlaq_f32(kappa_vec, coeff_vec, accu), beta_vec);
- const float32x4_t normalized_pixel = vmulq_f32(vld1q_f32(reinterpret_cast<const float *>(input.ptr())), vinvq_f32(normalized));
- vst1q_f32(reinterpret_cast<float *>(output.ptr()), normalized_pixel);
- },
- input, input_squared, output);
- }
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- else if(dt == DataType::F16)
+ execute_window_loop(window, [&](const Coordinates & id)
{
- const float16x8_t coeff_vec = vdupq_n_f16(_norm_info.scale_coeff());
- const float16x8_t beta_vec_f16 = vdupq_n_f16(_norm_info.beta());
- const float16x8_t kappa_vec = vdupq_n_f16(_norm_info.kappa());
-
- execute_window_loop(window, [&](const Coordinates & id)
+ // Get range to normalize
+ const int current_row = do_2D_norm ? id[dim_y] : 0;
+ const int current_slice = id[dim];
+ const int first_row = do_2D_norm ? std::max(current_row - radius, 0) : 0;
+ const int last_row = do_2D_norm ? std::min(current_row + radius, max_bottom) : 0;
+ const int first_slice = std::max(current_slice - radius, min_left);
+ const int last_slice = std::min(current_slice + radius, max_right);
+
+ // Accumulate 2D In-Map values
+ auto accu = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+ for(int j = first_row; j <= last_row; j++)
{
- // Get range to normalize
- const int current_row = do_2D_norm ? id[dim_y] : 0;
- const int current_slice = id[dim];
- const int first_row = do_2D_norm ? std::max(current_row - radius, 0) : 0;
- const int last_row = do_2D_norm ? std::min(current_row + radius, max_bottom) : 0;
- const int first_slice = std::max(current_slice - radius, min_left);
- const int last_slice = std::min(current_slice + radius, max_right);
-
- // Accumulate 2D In-Map values
- float16x8_t accu = vdupq_n_f16(0.f);
- for(int j = first_row; j <= last_row; j++)
+ // Compute row displacement
+ const int row = (j - current_row) * _input_squared->info()->strides_in_bytes()[dim_y];
+ const uint8_t *const input_squared_ptr = input_squared.ptr() + row - (current_slice * input_squared_stride);
+ for(int i = first_slice; i <= last_slice; ++i)
{
- // Compute row displacement
- const int row = (j - current_row) * _input_squared->info()->strides_in_bytes()[dim_y];
- const uint8_t *const input_squared_ptr = input_squared.ptr() + row - (current_slice * input_squared_stride);
- for(int i = first_slice; i <= last_slice; ++i)
- {
- accu = vaddq_f16(accu, vld1q_f16(reinterpret_cast<const float16_t *>(input_squared_ptr + i * input_squared_stride)));
- }
+ accu = wrapper::vadd(accu, wrapper::vloadq(reinterpret_cast<const T *>(input_squared_ptr + i * input_squared_stride)));
}
+ }
- const float16x8_t norm_f16 = vpowq_f16(vaddq_f16(kappa_vec, vmulq_f16(coeff_vec, accu)), beta_vec_f16);
- const float16x8_t normalized_pixel = vmulq_f16(vld1q_f16(reinterpret_cast<const float16_t *>(input.ptr())), vinvq_f16(norm_f16));
- vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()), normalized_pixel);
- },
- input, input_squared, output);
- }
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
- else
- {
- ARM_COMPUTE_ERROR("Not supported");
- }
+ // Normalize
+ const auto normalized = wrapper::vpow(wrapper::vmla(kappa_vec, coeff_vec, accu), beta_vec);
+ const auto normalized_pixel = wrapper::vmul(wrapper::vloadq(reinterpret_cast<const T *>(input.ptr())), wrapper::vinv(normalized));
+ wrapper::vstore(reinterpret_cast<T *>(output.ptr()), normalized_pixel);
+ },
+ input, input_squared, output);
}
Status NENormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *input_squared, const ITensorInfo *output, const NormalizationLayerInfo norm_info)
diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp
index f9b32b9259..20dcafb719 100644
--- a/tests/validation/NEON/NormalizationLayer.cpp
+++ b/tests/validation/NEON/NormalizationLayer.cpp
@@ -104,14 +104,14 @@ TEST_SUITE(Float)
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDataset,
framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NENormalizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset,
framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
@@ -122,14 +122,14 @@ TEST_SUITE_END() // FP16
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), NormalizationDatasetFP32),
framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NENormalizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), NormalizationDatasetFP32),
framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);