From 45198c8fe5c262cf7fba6f22cfc03ccf194e8bca Mon Sep 17 00:00:00 2001 From: Sheri Zhang Date: Tue, 14 Apr 2020 22:29:36 +0100 Subject: COMPMID-3239: Implement QSYMM16 LayerNormalizationKernel for CL Use NE/CLSynthetizeFunction instead of NE/CLQLSTMLayerNormalizationValidationFixture Signed-off-by: Sheri Zhang Change-Id: I62ace213a5261f2d307da6953d0521492aa05292 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3019 Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- tests/NEON/Helper.h | 9 ++++++ tests/validation/CL/QLSTMLayerNormalization.cpp | 6 ++-- tests/validation/NEON/QLSTMLayerNormalization.cpp | 7 +++-- .../fixtures/QLSTMLayerNormalizationFixture.h | 35 +--------------------- 4 files changed, 19 insertions(+), 38 deletions(-) diff --git a/tests/NEON/Helper.h b/tests/NEON/Helper.h index a56b4adc34..18e400d542 100644 --- a/tests/NEON/Helper.h +++ b/tests/NEON/Helper.h @@ -66,6 +66,15 @@ public: k->configure(std::forward(args)...); _kernel = std::move(k); } + /** Validate input arguments + * + * @param[in] args Configuration arguments. + */ + template + static Status validate(Args &&... args) + { + return K::validate(std::forward(args)...); + } }; /** As above but this also setups a Zero border on the input tensor of the specified bordersize */ diff --git a/tests/validation/CL/QLSTMLayerNormalization.cpp b/tests/validation/CL/QLSTMLayerNormalization.cpp index ea5eca6261..17f431cbbf 100644 --- a/tests/validation/CL/QLSTMLayerNormalization.cpp +++ b/tests/validation/CL/QLSTMLayerNormalization.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h" #include "tests/CL/CLAccessor.h" +#include "tests/CL/Helper.h" #include "tests/PaddingCalculator.h" #include "tests/datasets/ShapeDatasets.h" #include "tests/framework/Asserts.h" @@ -44,6 +45,7 @@ constexpr AbsoluteTolerance tolerance_s16(0); /**< Tolerance value for constexpr uint32_t vector_size_byte = 16; using test::datasets::ShapeDataset; +using CLQLSTMLayerNormalization = CLSynthetizeFunction; template class QLSTMLayerNormShapeDataSet : public ShapeDataset { @@ -127,7 +129,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, ), input_info, weight_info, bias_info) { TensorInfo dummy_output{}; - const Status s = CLQLSTMLayerNormalizationKernel::validate(&input_info, &dummy_output, &weight_info, &bias_info); + const Status s = CLQLSTMLayerNormalization::validate(&input_info, &dummy_output, &weight_info, &bias_info); ARM_COMPUTE_EXPECT(!bool(s), framework::LogLevel::ERRORS); } @@ -135,7 +137,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, // *INDENT-ON* template -using CLQLSTMLayerNormalizationFixture = CLQLSTMLayerNormalizationValidationFixture; +using CLQLSTMLayerNormalizationFixture = QLSTMLayerNormalizationValidationFixture; TEST_SUITE(Quantized) TEST_SUITE(QSYMM16) diff --git a/tests/validation/NEON/QLSTMLayerNormalization.cpp b/tests/validation/NEON/QLSTMLayerNormalization.cpp index 248bf5cf78..3d71175a6f 100644 --- a/tests/validation/NEON/QLSTMLayerNormalization.cpp +++ b/tests/validation/NEON/QLSTMLayerNormalization.cpp @@ -26,6 +26,7 @@ #include "arm_compute/runtime/Tensor.h" #include "arm_compute/runtime/TensorAllocator.h" #include "tests/NEON/Accessor.h" +#include "tests/NEON/Helper.h" #include "tests/PaddingCalculator.h" #include "tests/datasets/ShapeDatasets.h" #include "tests/framework/Asserts.h" @@ -46,6 +47,8 @@ namespace constexpr uint32_t vector_size_byte = 16; using test::datasets::ShapeDataset; +using NEQLSTMLayerNormalization = NESynthetizeFunction; + template class QLSTMLayerNormShapeDataSet : public ShapeDataset { @@ -150,7 +153,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, ), input_info, weight_info, bias_info, output_info) { - const Status s = NEQLSTMLayerNormalizationKernel::validate(&input_info, &output_info, &weight_info, &bias_info); + const Status s = NEQLSTMLayerNormalization::validate(&input_info, &output_info, &weight_info, &bias_info); ARM_COMPUTE_EXPECT(!bool(s), framework::LogLevel::ERRORS); } @@ -158,7 +161,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, // *INDENT-ON* template -using NEQLSTMLayerNormalizationFixture = NEQLSTMLayerNormalizationValidationFixture; +using NEQLSTMLayerNormalizationFixture = QLSTMLayerNormalizationValidationFixture; TEST_SUITE(Quantized) TEST_SUITE(QSYMM16) diff --git a/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h b/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h index 72af9d9241..cee39c2c82 100644 --- a/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h +++ b/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h @@ -26,10 +26,6 @@ #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" -#ifdef ARM_COMPUTE_CL -#include "arm_compute/runtime/CL/CLScheduler.h" -#endif /* ARM_COMPUTE_CL */ -#include "arm_compute/runtime/NEON/NEScheduler.h" #include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/IAccessor.h" @@ -101,8 +97,6 @@ protected: } } - virtual void run_target(FunctionType &fn) = 0; - TensorType compute_target(const TensorShape &input_shape, const TensorShape &weight_shape, const TensorShape &bias_shape) { TensorType input = create_tensor(input_shape, _data_type, 1); @@ -114,8 +108,7 @@ protected: fn.configure(&input, &output, &weight, &bias); allocate_tensors({ &input, &weight, &bias, &output }); fill(AccessorType(input), AccessorType(weight), AccessorType(bias)); - - run_target(fn); + fn.run(); return output; } @@ -138,32 +131,6 @@ protected: DataType _data_type{}; QuantizationInfo _qinfo{}; }; - -template -class NEQLSTMLayerNormalizationValidationFixture : public QLSTMLayerNormalizationValidationFixture -{ -protected: - void run_target(FunctionType &fn) override - { - ThreadInfo tinfo; - tinfo.cpu_info = &NEScheduler::get().cpu_info(); - fn.run(fn.window(), tinfo); - } -}; - -#ifdef ARM_COMPUTE_CL -template -class CLQLSTMLayerNormalizationValidationFixture : public QLSTMLayerNormalizationValidationFixture -{ -protected: - void run_target(FunctionType &fn) override - { - CLScheduler::get().default_init(); - fn.run(fn.window(), CLScheduler::get().queue()); - } -}; -#endif /* ARM_COMPUTE_CL */ - } // namespace validation } // namespace test } // namespace arm_compute -- cgit v1.2.1