diff options
-rw-r--r-- | src/cpu/kernels/CpuActivationKernel.cpp | 15 | ||||
-rw-r--r-- | tests/validation/fixtures/FFTFixture.h | 6 |
2 files changed, 6 insertions, 15 deletions
diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp index f1e485883c..e8ece26f17 100644 --- a/src/cpu/kernels/CpuActivationKernel.cpp +++ b/src/cpu/kernels/CpuActivationKernel.cpp @@ -208,18 +208,9 @@ void CpuActivationKernel::configure(const ITensorInfo *src, ITensorInfo *dst, Ac Window win; - if(src->data_layout() != DataLayout::NHWC) - { - // Use squashed window - std::tie(win, _split_dimension) = calculate_squashed_or_max_window(*src); - ICPPKernel::configure(win); - } - else - { - // Configure kernel window - win = calculate_max_window(*src, Steps()); - ICPPKernel::configure(win); - } + // Use squashed window + std::tie(win, _split_dimension) = calculate_squashed_or_max_window(*src); + ICPPKernel::configure(win); } Status CpuActivationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &act_info) diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h index a70335b6f1..16ac212fcc 100644 --- a/tests/validation/fixtures/FFTFixture.h +++ b/tests/validation/fixtures/FFTFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -202,6 +202,8 @@ protected: TensorType bias = create_tensor<TensorType>(bias_shape, _data_type, 1, QuantizationInfo(), _data_layout); TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, QuantizationInfo(), _data_layout); + add_padding_x({ &src, &weights, &bias, &dst }, _data_layout); + // Create and configure function FunctionType conv; conv.configure(&src, &weights, &bias, &dst, info, act_info, _data_type == DataType::F16); @@ -211,8 +213,6 @@ protected: ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); - add_padding_x({ &src, &weights, &bias, &dst }, _data_layout); - // Allocate tensors src.allocator()->allocate(); weights.allocator()->allocate(); |