From 668ccdcfb81bfab3a2d44cd1ddd956e83a2dfb09 Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Wed, 3 Feb 2021 10:32:59 +0000 Subject: Add dynamic tensor support to CpuElementwise The kernels and operators for binary and unary operations are now capable of being configured with dynamic shapes and computing windows at run-time. Additionally, changing arguments' names is done for consistency. Partially Implements: COMPMID-4127 Change-Id: I48e5038692db667dec7cb2b2906fe5683214fe19 Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4973 Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez Tello Comments-Addressed: Arm Jenkins --- tests/Utils.h | 53 ++++++++++++++++++++++ tests/validation/NEON/ElementwiseDivision.cpp | 28 ++++++++++++ tests/validation/NEON/ElementwiseRsqrtLayer.cpp | 19 ++++++++ .../validation/fixtures/ElementWiseUnaryFixture.h | 35 ++++++++++++-- .../fixtures/ElementwiseOperationsFixture.h | 51 +++++++++++++++++++-- 5 files changed, 179 insertions(+), 7 deletions(-) (limited to 'tests') diff --git a/tests/Utils.h b/tests/Utils.h index 2569c41a9e..fe9fe712cf 100644 --- a/tests/Utils.h +++ b/tests/Utils.h @@ -814,6 +814,59 @@ inline void sync_tensor_if_necessary(TensorType &tensor) { ARM_COMPUTE_UNUSED(tensor); } + +/** Construct and return object for dimensions' state filled with the given value + * + * @param[in] value The value to fill + * + * @return Constructed class + */ +inline ITensorInfo::TensorDimsState construct_dims_state(int32_t value) +{ + auto states = ITensorInfo::TensorDimsState{}; + std::fill(states.begin(), states.end(), value); + return states; +} + +/** Construct and return object for dimensions' state filled with the value for dynamic state + * + * @return Constructed class filled with the value for dynamic state + */ +inline ITensorInfo::TensorDimsState construct_dynamic_dims_state() +{ + return construct_dims_state(ITensorInfo::get_dynamic_state_value()); +} + +/** Construct and return object for dimensions' state filled with the value for non-dynamic state + * + * @return Constructed class filled with the value for non-dynamic state + */ +inline ITensorInfo::TensorDimsState construct_static_dims_state() +{ + return construct_dims_state(ITensorInfo::get_static_state_value()); +} + +/** Set the dimension states of the given tensor to dynamic + * + * @param[in] t The tensor to set to dynamic state + * + */ +template +void set_tensor_dynamic(TensorType &t) +{ + t.info()->set_tensor_dims_state(construct_dynamic_dims_state()); +} + +/** Set the dimension states of the given tensor to state + * + * @param[in] t The tensor to set to static state + * + */ +template +void set_tensor_static(TensorType &t) +{ + t.info()->set_tensor_dims_state(construct_static_dims_state()); +} } // namespace test } // namespace arm_compute #endif /* ARM_COMPUTE_TEST_UTILS_H */ diff --git a/tests/validation/NEON/ElementwiseDivision.cpp b/tests/validation/NEON/ElementwiseDivision.cpp index 3656560281..8abccb2ed6 100644 --- a/tests/validation/NEON/ElementwiseDivision.cpp +++ b/tests/validation/NEON/ElementwiseDivision.cpp @@ -93,6 +93,34 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( // clang-format on // *INDENT-ON* +// Test test cases will execute the function with dynamic-stated shapes +// Since other elementwise operations share the same kernel, this tests are added only here. +// Also, only FP32 is tested since data type doesn't/shouldn't matter with dynamic shapes. +TEST_SUITE(DynamicShape) +template +using CpuElementwiseDivisionDynamicShapeFixture = ArithmeticDivisionDynamicShapeValidationFixture; + +template +using CpuElementwiseDivisionBroadcastDynamicShapeFixture = ArithmeticDivisionBroadcastDynamicShapeValidationFixture; + +TEST_SUITE(F32) + +FIXTURE_DATA_TEST_CASE(RunSmall, CpuElementwiseDivisionDynamicShapeFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwiseDivisionFP32Dataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32, 0.01); +} + +FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CpuElementwiseDivisionBroadcastDynamicShapeFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapesBroadcast(), + ElementwiseDivisionFP32Dataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32, 0.01); +} + +TEST_SUITE_END() // F32 +TEST_SUITE_END() // DynamicShape + TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(F16) diff --git a/tests/validation/NEON/ElementwiseRsqrtLayer.cpp b/tests/validation/NEON/ElementwiseRsqrtLayer.cpp index f41500cc0b..1591b76cd7 100644 --- a/tests/validation/NEON/ElementwiseRsqrtLayer.cpp +++ b/tests/validation/NEON/ElementwiseRsqrtLayer.cpp @@ -50,6 +50,25 @@ RelativeTolerance tolerance_fp16(0.01f); TEST_SUITE(NEON) TEST_SUITE(RsqrtLayer) +// Test test cases will execute the function with dynamic-stated shapes +// Since other elementwise unary operations share the same kernel, this tests are added only here. +// Also, only FP32 is tested since data type doesn't/shouldn't matter with dynamic shapes. +TEST_SUITE(DynamicShape) +TEST_SUITE(FP32) + +template +using CpuRsqrtDynamicShapeFixture = RsqrtDynamicShapeValidationFixture; + +FIXTURE_DATA_TEST_CASE(RunSmall, CpuRsqrtDynamicShapeFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", + DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32); +} + +TEST_SUITE_END() // FP32 +TEST_SUITE_END() // DynamicShape + template using NERsqrtLayerFixture = RsqrtValidationFixture; diff --git a/tests/validation/fixtures/ElementWiseUnaryFixture.h b/tests/validation/fixtures/ElementWiseUnaryFixture.h index 8cffef48f6..9729907630 100644 --- a/tests/validation/fixtures/ElementWiseUnaryFixture.h +++ b/tests/validation/fixtures/ElementWiseUnaryFixture.h @@ -44,11 +44,12 @@ class ElementWiseUnaryValidationFixture : public framework::Fixture { public: template - void setup(TensorShape input_shape, DataType input_data_type, bool in_place, ElementWiseUnary op) + void setup(TensorShape input_shape, DataType input_data_type, bool in_place, ElementWiseUnary op, bool use_dynamic_shape = false) { - _op = op; - _target = compute_target(input_shape, input_data_type, in_place); - _reference = compute_reference(input_shape, input_data_type); + _op = op; + _target = compute_target(input_shape, input_data_type, in_place); + _reference = compute_reference(input_shape, input_data_type); + _use_dynamic_shape = use_dynamic_shape; } protected: @@ -131,10 +132,24 @@ protected: TensorType *actual_dst = in_place ? &src : &dst; + // if _use_dynamic_shape is true, this fixture will test scenario for dynamic shapes. + // - At configure time, all input tensors are marked as dynamic using set_tensor_dynamic() + // - After configure, tensors are marked as static for run using set_tensor_static() + // - The tensors with static shape are given to run() + if(_use_dynamic_shape) + { + set_tensor_dynamic(src); + } + // Create and configure function FunctionType elwiseunary_layer; elwiseunary_layer.configure(&src, actual_dst); + if(_use_dynamic_shape) + { + set_tensor_static(src); + } + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); src.allocator()->allocate(); ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -175,6 +190,7 @@ protected: TensorType _target{}; SimpleTensor _reference{}; ElementWiseUnary _op{}; + bool _use_dynamic_shape{ false }; }; template @@ -188,6 +204,17 @@ public: } }; +template +class RsqrtDynamicShapeValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::RSQRT, true); + } +}; + template class ExpValidationFixture : public ElementWiseUnaryValidationFixture { diff --git a/tests/validation/fixtures/ElementwiseOperationsFixture.h b/tests/validation/fixtures/ElementwiseOperationsFixture.h index dcb408c801..bf51c7e69b 100644 --- a/tests/validation/fixtures/ElementwiseOperationsFixture.h +++ b/tests/validation/fixtures/ElementwiseOperationsFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -48,9 +48,11 @@ public: template void setup(ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool use_dyanmic_shape = false) { - _op = op; + _op = op; + _use_dynamic_shape = use_dyanmic_shape; + _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, qinfo0, qinfo1, qinfo_out); _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, qinfo0, qinfo1, qinfo_out); } @@ -87,10 +89,26 @@ protected: TensorType ref_src2 = create_tensor(shape1, data_type1, 1, qinfo1); TensorType dst = create_tensor(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out); + // if _use_dynamic_shape is true, this fixture will test scenario for dynamic shapes. + // - At configure time, all input tensors are marked as dynamic using set_tensor_dynamic() + // - After configure, tensors are marked as static for run using set_tensor_static() + // - The tensors with static shape are given to run() + if(_use_dynamic_shape) + { + set_tensor_dynamic(ref_src1); + set_tensor_dynamic(ref_src2); + } + // Create and configure function FunctionType elem_op; elem_op.configure(&ref_src1, &ref_src2, &dst); + if(_use_dynamic_shape) + { + set_tensor_static(ref_src1); + set_tensor_static(ref_src2); + } + ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -133,6 +151,7 @@ protected: TensorType _target{}; SimpleTensor _reference{}; ArithmeticOperation _op{ ArithmeticOperation::ADD }; + bool _use_dynamic_shape{ false }; }; // Arithmetic operation fused with activation function @@ -225,6 +244,32 @@ public: } }; +template +class ArithmeticDivisionBroadcastDynamicShapeValidationFixture : public ArithmeticOperationsGenericFixture +{ +public: + template + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type) + { + ArithmeticOperationsGenericFixture::setup(ArithmeticOperation::DIV, shape0, shape1, + data_type0, data_type1, output_data_type, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), true); + } +}; + +template +class ArithmeticDivisionDynamicShapeValidationFixture : public ArithmeticOperationsGenericFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type) + { + ArithmeticOperationsGenericFixture::setup(ArithmeticOperation::DIV, shape, shape, + data_type0, data_type1, output_data_type, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), true); + } +}; + template class ArithmeticDivisionBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture { -- cgit v1.2.1