From 63825e8259508dc7731b6de2e008c5ef8c738d79 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Thu, 25 Mar 2021 14:54:50 +0000 Subject: Implicit padding testing along the X axis on high priority operators Add artificial implicit padding testing for the following fixtures: - Scale - FullyConnected - Pooling - DepthwiseConvolution - DirectConvolution - Winograd - FFT - GEMM/GEMMLowp Create utility function that loops through a list of tensor and adds random padding based on the global seed (only for NHWC layer layout). Remove GEMMLowpAssemblyFixture since it wasn't used Remove some AssetsLibrary headers since they weren't used Resolve COMPMID-4161 Change-Id: Ib6f4f7f113ae69b993d7b2a9e04abbf3de8c99fe Signed-off-by: Giorgio Arena Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5327 Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- tests/validation/Helpers.cpp | 24 ++++ tests/validation/Helpers.h | 9 ++ tests/validation/NEON/GEMMLowp.cpp | 1 - tests/validation/UNIT/GPUTarget.cpp | 3 +- tests/validation/UNIT/SafeIntegerOps.cpp | 3 +- .../validation/fixtures/ConvolutionLayerFixture.h | 6 +- .../fixtures/DepthwiseConvolutionLayerFixture.h | 12 +- .../fixtures/DirectConvolutionLayerFixture.h | 6 +- tests/validation/fixtures/FFTFixture.h | 14 +- .../fixtures/FullyConnectedLayerFixture.h | 5 +- tests/validation/fixtures/GEMMFixture.h | 46 +++++++ .../validation/fixtures/GEMMLowpAssemblyFixture.h | 141 --------------------- tests/validation/fixtures/GEMMLowpFixture.h | 14 ++ .../fixtures/GEMMReshapeLHSMatrixFixture.h | 4 +- .../fixtures/GEMMReshapeRHSMatrixFixture.h | 4 +- tests/validation/fixtures/PoolingLayerFixture.h | 12 +- tests/validation/fixtures/ScaleFixture.h | 3 +- .../fixtures/WinogradConvolutionLayerFixture.h | 34 ++--- 18 files changed, 158 insertions(+), 183 deletions(-) delete mode 100644 tests/validation/fixtures/GEMMLowpAssemblyFixture.h (limited to 'tests') diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp index 7ff2ab6219..b53d46fc76 100644 --- a/tests/validation/Helpers.cpp +++ b/tests/validation/Helpers.cpp @@ -325,6 +325,30 @@ std::pair get_symm_quantized_per_channel_bounds(const QuantizationInfo return std::pair { min_bound, max_bound }; } +void add_padding_x(std::initializer_list tensors, const DataLayout &data_layout) +{ + if(data_layout == DataLayout::NHWC) + { + constexpr unsigned int lower = 1U; + constexpr unsigned int upper = 16U; + + std::uniform_int_distribution distribution(lower, upper); + size_t seed_offset = 0; + + for(ITensor *tensor : tensors) + { + ARM_COMPUTE_ERROR_ON(!tensor->info()->is_resizable()); + + std::mt19937 gen(library->seed() + seed_offset++); + + const unsigned int right = distribution(gen); + const unsigned int left = distribution(gen); + + tensor->info()->extend_padding(PaddingSize(0U, right, 0U, left)); + } + } +} + template void get_tile(const SimpleTensor &in, SimpleTensor &roi, const Coordinates &coord); template void get_tile(const SimpleTensor &in, SimpleTensor &roi, const Coordinates &coord); template void get_tile(const SimpleTensor &in, SimpleTensor &roi, const Coordinates &coord); diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h index 30ec14e716..3ba3bd1259 100644 --- a/tests/validation/Helpers.h +++ b/tests/validation/Helpers.h @@ -229,6 +229,15 @@ std::pair get_quantized_qasymm8_signed_bounds(const QuantizationInfo & * @param[in] channel_id Channel id for per channel quantization info. */ std::pair get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0); + +/** Add random padding along the X axis (between 1 and 16 columns per side) to all the input tensors + * + * @param[in] tensors List of tensors to add padding to + * @param[in] data_layout (Optional) Data layout of the operator + * + * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC + */ +void add_padding_x(std::initializer_list tensors, const DataLayout &data_layout = DataLayout::NHWC); } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index 518f4804a0..9d075e12c1 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -37,7 +37,6 @@ #include "tests/framework/Macros.h" #include "tests/framework/datasets/Datasets.h" #include "tests/validation/Validation.h" -#include "tests/validation/fixtures/GEMMLowpAssemblyFixture.h" #include "tests/validation/fixtures/GEMMLowpFixture.h" namespace arm_compute diff --git a/tests/validation/UNIT/GPUTarget.cpp b/tests/validation/UNIT/GPUTarget.cpp index e1b7e1fe3f..d2c81cf778 100644 --- a/tests/validation/UNIT/GPUTarget.cpp +++ b/tests/validation/UNIT/GPUTarget.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,7 +22,6 @@ * SOFTWARE. */ #include "arm_compute/core/GPUTarget.h" -#include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/Utils.h" #include "tests/framework/Asserts.h" diff --git a/tests/validation/UNIT/SafeIntegerOps.cpp b/tests/validation/UNIT/SafeIntegerOps.cpp index 62f70414f1..13e4ef5125 100644 --- a/tests/validation/UNIT/SafeIntegerOps.cpp +++ b/tests/validation/UNIT/SafeIntegerOps.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,7 +23,6 @@ */ #include "arm_compute/core/GPUTarget.h" #include "arm_compute/core/utils/math/SafeOps.h" -#include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/Utils.h" #include "tests/framework/Asserts.h" diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h index 07790e84d9..b649280ae0 100644 --- a/tests/validation/fixtures/ConvolutionLayerFixture.h +++ b/tests/validation/fixtures/ConvolutionLayerFixture.h @@ -69,7 +69,8 @@ public: public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, - DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, bool mixed_layout = false) + DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, + bool mixed_layout = false) { _mixed_layout = mixed_layout; _data_type = data_type; @@ -87,7 +88,6 @@ public: } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { // Test Multi DataLayout graph cases, when the data layout changes after configure @@ -214,6 +214,8 @@ protected: ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &weights, &bias, &dst }, _data_layout); + // Allocate tensors src.allocator()->allocate(); weights.allocator()->allocate(); diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h index 0aa43d82b4..e87e31f97b 100644 --- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h @@ -116,6 +116,9 @@ public: void allocate_and_run_target() { + // TODO: uncomment after COMPMID-4361 + // add_padding_x({ &_src, &_weights, &_biases, &_target }, _data_layout); + // Allocate tensors _src.allocator()->allocate(); _weights.allocator()->allocate(); @@ -131,7 +134,7 @@ public: fill(AccessorType(_src), 0); fill(AccessorType(_weights), 1); fill(AccessorType(_biases), 2); - + if(_mixed_layout) { mix_layout(_dwc, _src, _target); @@ -158,7 +161,6 @@ public: } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { // Test Multi DataLayout graph cases, when the data layout changes after configure @@ -237,7 +239,7 @@ protected: ActivationLayerInfo _act_info{}; unsigned int _depth_multiplier{}; Size2D _dilation{}; - bool _mixed_layout{false}; + bool _mixed_layout{ false }; }; template @@ -309,6 +311,8 @@ public: void allocate_and_run_target() { + add_padding_x({ &_src, &_weights, &_biases, &_target }, _data_layout); + // Allocate tensors _src.allocator()->allocate(); _weights.allocator()->allocate(); @@ -442,6 +446,8 @@ public: void allocate_and_run_target() { + add_padding_x({ &_src, &_weights, &_biases, &_target }, _data_layout); + // Allocate tensors _src.allocator()->allocate(); _weights.allocator()->allocate(); diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h index 5ed0b9f9a3..b79991e3d4 100644 --- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h @@ -90,7 +90,6 @@ public: } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { DataLayout data_layout = src.info()->data_layout(); @@ -172,6 +171,9 @@ protected: ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + // TODO: uncomment after COMPMID-4341 + // add_padding_x({ &src, &weights, &bias, &dst }, data_layout); + // Allocate tensors src.allocator()->allocate(); weights.allocator()->allocate(); @@ -221,7 +223,7 @@ protected: SimpleTensor _reference{}; QuantizationInfo _quantization_info{}; DataType _data_type{}; - bool _mixed_layout {false}; + bool _mixed_layout{ false }; }; template diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h index 199730d5d0..3a75135718 100644 --- a/tests/validation/fixtures/FFTFixture.h +++ b/tests/validation/fixtures/FFTFixture.h @@ -91,6 +91,9 @@ protected: ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + // TODO: uncomment after COMPMID-4362 + // add_padding_x({ &src, &dst }); + // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); @@ -137,15 +140,14 @@ public: DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false) { _mixed_layout = mixed_layout; - _data_type = data_type; - _data_layout = data_layout; + _data_type = data_type; + _data_layout = data_layout; _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { // Test Multi DataLayout graph cases, when the data layout changes after configure @@ -210,6 +212,8 @@ protected: ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &weights, &bias, &dst }, _data_layout); + // Allocate tensors src.allocator()->allocate(); weights.allocator()->allocate(); @@ -225,7 +229,7 @@ protected: fill(AccessorType(src), 0); fill(AccessorType(weights), 1); fill(AccessorType(bias), 2); - + if(_mixed_layout) { mix_layout(conv, src, dst); @@ -261,7 +265,7 @@ protected: SimpleTensor _reference{}; DataType _data_type{}; DataLayout _data_layout{}; - bool _mixed_layout{false}; + bool _mixed_layout{ false }; }; template diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h index 8f38aae187..383d88009f 100644 --- a/tests/validation/fixtures/FullyConnectedLayerFixture.h +++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h @@ -72,7 +72,6 @@ public: } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { const DataLayout data_layout = src.info()->data_layout(); @@ -165,6 +164,8 @@ protected: ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &weights, &bias, &dst }); + // Allocate tensors src.allocator()->allocate(); weights.allocator()->allocate(); @@ -238,7 +239,7 @@ protected: SimpleTensor _reference{}; DataType _data_type{}; DataType _bias_data_type{}; - bool _mixed_layout{false}; + bool _mixed_layout{ false }; QuantizationInfo _quantization_info{}; ActivationLayerInfo _activation_info{}; }; diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h index 500e094e18..45516d4187 100644 --- a/tests/validation/fixtures/GEMMFixture.h +++ b/tests/validation/fixtures/GEMMFixture.h @@ -105,6 +105,8 @@ protected: ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &a, &b, &c, &dst }); + // Allocate tensors a.allocator()->allocate(); b.allocator()->allocate(); @@ -231,6 +233,8 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &bias, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -347,6 +351,8 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &bias, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -483,6 +489,12 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + // TODO: remove if statement after COMPMID-4368 + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst }); + } + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -625,6 +637,12 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + // TODO: remove if statement after COMPMID-4368 + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst }); + } + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -787,6 +805,12 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + // TODO: remove if statement after COMPMID-4368 + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst }); + } + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -955,6 +979,12 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + // TODO: remove if statement after COMPMID-4368 + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst }); + } + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -1118,6 +1148,12 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + // TODO: remove if statement after COMPMID-4368 + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &rhs_reshaped, &bias, &dst }); + } + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -1277,6 +1313,12 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + // TODO: remove if statement after COMPMID-4368 + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &rhs_reshaped, &bias, &dst }); + } + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -1410,6 +1452,8 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &bias, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -1539,6 +1583,8 @@ protected: ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &bias, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); diff --git a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h deleted file mode 100644 index e9ec1bc365..0000000000 --- a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2017-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE -#define ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE - -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Fixture.h" -#include "tests/validation/Helpers.h" -#include "tests/validation/reference/GEMMLowp.h" - -#include - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -template -class GEMMLowpAssemblyFixture : public framework::Fixture -{ -public: - template - void setup(size_t m, size_t n, size_t k) - { - const TensorShape shape_a(k, m); - const TensorShape shape_b(n, k); - const TensorShape shape_c(n, m); - _target = compute_target(shape_a, shape_b, shape_c); - _reference = compute_reference(shape_a, shape_b, shape_c); - } - -protected: - template - void fill(U &&tensor, int i, int lo, int hi) - { - std::uniform_int_distribution<> distribution(lo, hi); - library->fill(tensor, distribution, i); - } - - TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) - { - DataType dt_in = std::is_same::value ? DataType::S8 : DataType::U8; - - // Create tensors - TensorType a = create_tensor(shape_a, dt_in, 1); - TensorType b = create_tensor(shape_b, dt_in, 1); - TensorType c = create_tensor(shape_c, DataType::S32, 1); - - // Create and configure function - FunctionType gemmlowp; - gemmlowp.configure(&a, &b, nullptr, &c); - - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Allocate tensors - a.allocator()->allocate(); - b.allocator()->allocate(); - c.allocator()->allocate(); - - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Fill tensors - if(dt_in == DataType::S8) - { - fill(AccessorType(a), 0, -128, 127); - fill(AccessorType(b), 1, -128, 127); - } - else - { - fill(AccessorType(a), 0, 0, 255); - fill(AccessorType(b), 1, 0, 255); - } - fill(AccessorType(c), 2, 0, 0); - - // Compute GEMM function - gemmlowp.run(); - return c; - } - - SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) - { - DataType dt = std::is_same::value ? DataType::S8 : DataType::U8; - - // Create reference - SimpleTensor a{ shape_a, dt, 1 }; - SimpleTensor b{ shape_b, dt, 1 }; - - // Fill reference - if(dt == DataType::S8) - { - fill(a, 0, -128, 127); - fill(b, 1, -128, 127); - } - else - { - fill(a, 0, 0, 255); - fill(b, 1, 0, 255); - } - - return reference::gemmlowp(a, b, shape_c); - } - - TensorType _target{}; - SimpleTensor _reference{}; -}; - -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */ diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h index 95f49601a5..c3da2e261d 100644 --- a/tests/validation/fixtures/GEMMLowpFixture.h +++ b/tests/validation/fixtures/GEMMLowpFixture.h @@ -133,6 +133,8 @@ TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &a, &b, &output }); + // Allocate tensors a.allocator()->allocate(); b.allocator()->allocate(); @@ -948,6 +950,8 @@ protected: ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -1098,6 +1102,8 @@ protected: ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -1247,6 +1253,8 @@ protected: ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -1390,6 +1398,8 @@ protected: ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -1502,6 +1512,8 @@ protected: ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); @@ -1597,6 +1609,8 @@ protected: ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &dst }); + // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); diff --git a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h index d0855093a7..4ee493b2fd 100644 --- a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h +++ b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -91,6 +91,8 @@ protected: ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &dst }); + // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); diff --git a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h index 99bfa3bced..3f73912373 100644 --- a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h +++ b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -90,6 +90,8 @@ protected: ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &dst }); + // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h index ee81ff5538..66e09d5bdb 100644 --- a/tests/validation/fixtures/PoolingLayerFixture.h +++ b/tests/validation/fixtures/PoolingLayerFixture.h @@ -50,13 +50,12 @@ public: QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo(), bool mixed_layout = false) { _mixed_layout = mixed_layout; - _pool_info = pool_info; - _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); - _reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); + _pool_info = pool_info; + _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); + _reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { const DataLayout data_layout = src.info()->data_layout(); @@ -115,6 +114,9 @@ protected: ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(_target_indices.info()->is_resizable(), framework::LogLevel::ERRORS); + // TODO: uncomment after COMPMID-4363 + // add_padding_x({ &src, &dst, &_target_indices }, data_layout); + // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); @@ -152,7 +154,7 @@ protected: TensorType _target{}; SimpleTensor _reference{}; PoolingLayerInfo _pool_info{}; - bool _mixed_layout{false}; + bool _mixed_layout{ false }; TensorType _target_indices{}; SimpleTensor _ref_indices{}; }; diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h index 9e0f620abe..a40cfdab10 100644 --- a/tests/validation/fixtures/ScaleFixture.h +++ b/tests/validation/fixtures/ScaleFixture.h @@ -68,7 +68,6 @@ public: } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { const DataLayout data_layout = src.info()->data_layout(); @@ -163,6 +162,8 @@ protected: ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &dst }, data_layout); + // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h index f956963e14..a1433e9115 100644 --- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h +++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h @@ -62,12 +62,11 @@ public: { ARM_COMPUTE_UNUSED(dilation); _mixed_layout = mixed_layout; - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout); - _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout); + _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info); } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { const DataLayout data_layout = src.info()->data_layout(); @@ -134,6 +133,8 @@ protected: ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &weights, &bias, &dst }, data_layout); + // Allocate tensors src.allocator()->allocate(); weights.allocator()->allocate(); @@ -235,7 +236,7 @@ protected: TensorType _target{}; SimpleTensor _reference{}; - bool _mixed_layout{false}; + bool _mixed_layout{ false }; }; template @@ -246,13 +247,12 @@ public: void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type) { TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info); - _mixed_layout = mixed_layout; - _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); - _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); + _mixed_layout = mixed_layout; + _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); + _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { const DataLayout data_layout_src = src.info()->data_layout(); @@ -311,6 +311,8 @@ protected: ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &dst }, data_layout); + // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); @@ -344,7 +346,7 @@ protected: return reference::winograd_input_transform(src, output_shape, winograd_info); } - bool _mixed_layout {false}; + bool _mixed_layout{ false }; TensorType _target{}; SimpleTensor _reference{}; }; @@ -360,12 +362,11 @@ public: TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info); _mixed_layout = mixed_layout; - _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); - _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); + _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); + _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { const DataLayout data_layout_src = src.info()->data_layout(); @@ -425,6 +426,8 @@ protected: ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &dst }, data_layout); + // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); @@ -458,7 +461,7 @@ protected: return reference::winograd_filter_transform(src, output_shape, winograd_info); } - bool _mixed_layout {false}; + bool _mixed_layout{ false }; TensorType _target{}; SimpleTensor _reference{}; }; @@ -475,7 +478,6 @@ public: } protected: - void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { const DataLayout data_layout_src = src.info()->data_layout(); @@ -534,6 +536,8 @@ protected: ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &src, &bias, &dst }, winograd_info.output_data_layout); + // Allocate tensors src.allocator()->allocate(); bias.allocator()->allocate(); @@ -577,7 +581,7 @@ protected: return (act_info.enabled()) ? reference::activation_layer(winograd_output, act_info) : winograd_output; } - bool _mixed_layout {false}; + bool _mixed_layout{ false }; TensorType _target{}; SimpleTensor _reference{}; }; -- cgit v1.2.1